Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Llamea in Nevergrad #1617

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
The diff you're trying to view is too large. We only load the first 3000 changed files.
49 changes: 30 additions & 19 deletions nevergrad/benchmark/experiments.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,21 +61,26 @@ def refactor_optims(x: tp.List[tp.Any]) -> tp.List[tp.Any]: # type: ignore
# "SmallLognormalDiscreteOnePlusOne",
# "XLognormalDiscreteOnePlusOne",
# ])]
return [
# "BigLognormalDiscreteOnePlusOne",
# "DiscreteLenglerOnePlusOne",
# "NgLn",
# "SmallLognormalDiscreteOnePlusOne",
# "XLognormalDiscreteOnePlusOne",
"XSmallLognormalDiscreteOnePlusOne",
"MultiLN",
"NgRS",
"NgIohRS",
"NgIohMLn",
"NgIohLn",
# "LognormalDiscreteOnePlusOne",
# "HugeLognormalDiscreteOnePlusOne",
]
lama = ["DiagonalCMA", "PymooBIPOP", "DE", "SQOPSO"] + (["NgIohTuned"] * 5) + [o for o in list(ng.optimizers.registry.keys()) if "LAMA" in o]
optims = [o for o in ng.optimizers.registry.keys() if "LAMA" in o]
lama = ["DiagonalCMA", "PymooBIPOP", "DE", "SQOPSO"] + (["NgIohTuned"] * 10) + [o for o in optims if any([(x in o) for x in ["ADEM", "ptiveHarmonySearch", "CMAESDE","bridDEPSOWithDyn", "CMA","ERADS_Q","EnhancedDynamicPrec","hancedFirew","QPSO","QuantumDifferentialPart"]])]
return list(np.random.choice(lama, 55))
# "BigLognormalDiscreteOnePlusOne",
# "DiscreteLenglerOnePlusOne",
# "NgLn",
# "SmallLognormalDiscreteOnePlusOne",
# "XLognormalDiscreteOnePlusOne",
if False:
return [
"XSmallLognormalDiscreteOnePlusOne",
"MultiLN",
"NgRS",
"NgIohRS",
"NgIohMLn",
"NgIohLn",
# "LognormalDiscreteOnePlusOne",
# "HugeLognormalDiscreteOnePlusOne",
]
# return ["CSEC11"]
# return [np.random.choice(["CSEC11", "SQOPSODCMA", "NgIoh4", "NGOpt"])]
# return ["LPCMA"] #return [np.random.choice(["CSEC10", "DSproba", "NgIoh4", "DSbase", "DS3p", "DSsubspace"])]
Expand Down Expand Up @@ -739,6 +744,7 @@ def keras_tuning(
optims = refactor_optims(optims)
datasets = ["kerasBoston", "diabetes", "auto-mpg", "red-wine", "white-wine"]
optims = refactor_optims(optims)
optims = ["NgIohTuned"]
for dimension in [None]:
for dataset in datasets:
function = MLTuning(
Expand Down Expand Up @@ -1691,15 +1697,15 @@ def yabbob(
for name in names
for rotation in [True, False]
for num_blocks in ([1] if not split else [7, 12])
for d in (
for d in (
[100, 1000, 3000]
if hd
else (
[2, 5, 10, 15]
if tuning
else ([40] if bounded else ([2, 3, 5, 10, 15, 20, 50] if noise else [2, 10, 50]))
else ([40] if bounded else ([2, 3, 5, 10, 15, 20, 50] if noise else [2, 5, 10, 50])) # added 5 for lama stuff
)
)
)
]

assert reduction_factor in [1, 7, 13, 17] # needs to be a cofactor
Expand Down Expand Up @@ -1767,6 +1773,8 @@ def f(x):
if bounded:
budgets = [10, 20, 40, 100, 300]
optims = refactor_optims(optims)
if hd or big:
optims = [np.random.choice(optims)]
for optim in optims:
for function in functions:
for budget in budgets:
Expand Down Expand Up @@ -2121,6 +2129,7 @@ def zp_ms_bbob(seed: tp.Optional[int] = None) -> tp.Iterator[Experiment]:
yield Experiment(function, optim, budget=budget, num_workers=nw, seed=next(seedg))


@registry.register
def nozp_noms_bbob(seed: tp.Optional[int] = None) -> tp.Iterator[Experiment]:
"""Testing optimizers on exponentiated problems.
Cigar, Ellipsoid.
Expand Down Expand Up @@ -3852,7 +3861,9 @@ def lsgo() -> tp.Iterator[Experiment]:
optims = ["DiagonalCMA", "TinyQODE", "OpoDE", "OpoTinyDE"]
optims = ["TinyQODE", "OpoDE", "OpoTinyDE"]
optims = refactor_optims(optims)
for i in range(1, 16): # [np.random.choice(list(range(1, 16)))]:
optims = [np.random.choice(optims)]
optims = ["NgIohTuned"]
for i in [np.random.choice(list(range(1, 16)))]: # [np.random.choice(list(range(1, 16)))]:
for optim in optims:
for budget in [120000, 600000, 3000000]:
yield Experiment(lsgo_makefunction(i).instrumented(), optim, budget=budget)
Expand Down
16 changes: 15 additions & 1 deletion nevergrad/benchmark/plotting.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,17 @@ def remove_errors(df: pd.DataFrame) -> utils.Selector:
df = utils.Selector(df)
if "error" not in df.columns: # backward compatibility
return df # type: ignore
dropped = []
non_dropped = 0
for index, row in df.iterrows():
try:
if np.isnan(row["loss"]):
pass
non_dropped += 1
except:
dropped += [index]
print(f"Dropped: {len(dropped)}, Non-dropped: {non_dropped}")
df.drop(dropped, inplace=True)
# errors with no recommendation
nandf = df.select(loss=np.isnan)
for row in nandf.itertuples():
Expand All @@ -153,7 +164,10 @@ def remove_errors(df: pd.DataFrame) -> utils.Selector:
err_inds = set(nandf.index)
output = df.loc[[i for i in df.index if i not in err_inds], [c for c in df.columns if c != "error"]]
# cast nans in loss to infinity
df.loc[np.isnan(df.loss), "loss"] = float("inf")
try:
df.loc[np.isnan(df.loss), "loss"] = float("inf")
except Exception as e:
print(f"pb with isnan(loss): {e}")
#
assert (
not output.loc[:, "loss"].isnull().values.any()
Expand Down
75 changes: 75 additions & 0 deletions nevergrad/optimization/lama/AADCCS.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
import numpy as np


class AADCCS:
def __init__(
self,
budget,
dimension=5,
lower_bound=-5.0,
upper_bound=5.0,
population_size=150,
F_base=0.5,
CR_base=0.8,
learning_rate=0.1,
p=0.25,
):
self.budget = budget
self.dimension = dimension
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.population_size = population_size
self.F_base = F_base # Initial mutation factor
self.CR_base = CR_base # Initial crossover probability
self.learning_rate = learning_rate # Learning rate for adaptive parameters
self.p = p # Probability of using best individual updates

def __call__(self, func):
# Initialize population and fitness
population = np.random.uniform(
self.lower_bound, self.upper_bound, (self.population_size, self.dimension)
)
fitness = np.array([func(ind) for ind in population])
evaluations = self.population_size

best_index = np.argmin(fitness)
best_individual = population[best_index]
best_fitness = fitness[best_index]

# Adaptive mutation and crossover probabilities
F_adaptive = np.full(self.population_size, self.F_base)
CR_adaptive = np.full(self.population_size, self.CR_base)

while evaluations < self.budget:
for i in range(self.population_size):
if evaluations >= self.budget:
break

# Choose different indices for mutation, ensuring all are unique
indices = np.random.choice(self.population_size, 4, replace=False)
a, b, c, d = population[indices]

# Mutation with best individual influence
if np.random.rand() < self.p:
a = best_individual # Using best individual to guide mutation

# Differential mutation and crossover
mutant = np.clip(a + F_adaptive[i] * ((b - c) + (a - d)), self.lower_bound, self.upper_bound)
trial = np.where(np.random.rand(self.dimension) < CR_adaptive[i], mutant, population[i])
trial_fitness = func(trial)
evaluations += 1

# Selection and adaptivity update
if trial_fitness < fitness[i]:
population[i], fitness[i] = trial, trial_fitness
if trial_fitness < best_fitness:
best_fitness, best_individual = trial_fitness, trial.copy()
# Adaptive factor update towards successful mutations
F_adaptive[i] = max(0.1, F_adaptive[i] + self.learning_rate * (1.0 - F_adaptive[i]))
CR_adaptive[i] = min(1.0, CR_adaptive[i] - self.learning_rate * CR_adaptive[i])
else:
# Adaptive factor degradation towards unsuccessful mutations
F_adaptive[i] = max(0.1, F_adaptive[i] - self.learning_rate * F_adaptive[i])
CR_adaptive[i] = min(1.0, CR_adaptive[i] + self.learning_rate * (1.0 - CR_adaptive[i]))

return best_fitness, best_individual
83 changes: 83 additions & 0 deletions nevergrad/optimization/lama/AADEHLS.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
import numpy as np


class AADEHLS:
def __init__(self, budget, population_size=50, F_init=0.5, CR_init=0.9):
self.budget = budget
self.CR_init = CR_init
self.F_init = F_init
self.population_size = population_size
self.dimension = 5
self.lower_bound = -5.0
self.upper_bound = 5.0

def opposite_point(self, x):
return self.lower_bound + self.upper_bound - x

def __call__(self, func):
# Initialize population with Opposition-Based Learning
population = np.random.uniform(
self.lower_bound, self.upper_bound, (self.population_size, self.dimension)
)
opposite_population = self.opposite_point(population)
combined_population = np.vstack((population, opposite_population))
fitness = np.array([func(ind) for ind in combined_population])
indices = np.argsort(fitness)
population = combined_population[indices[: self.population_size]]
fitness = fitness[indices[: self.population_size]]

best_idx = np.argmin(fitness)
best_solution = population[best_idx]
best_fitness = fitness[best_idx]

F = self.F_init
CR = self.CR_init
successful_F = []
successful_CR = []

evaluations = self.population_size * 2
while evaluations < self.budget:
for i in range(self.population_size):
idxs = [idx for idx in range(self.population_size) if idx != i]
a, b, c = np.random.choice(idxs, 3, replace=False)
mutant = population[a] + F * (population[b] - population[c])
mutant = np.clip(mutant, self.lower_bound, self.upper_bound)

cross_points = np.random.rand(self.dimension) < CR
trial = np.where(cross_points, mutant, population[i])

trial_fitness = func(trial)
evaluations += 1

if trial_fitness < fitness[i]:
population[i] = trial
fitness[i] = trial_fitness
successful_F.append(F)
successful_CR.append(CR)
if trial_fitness < best_fitness:
best_solution = trial
best_fitness = trial_fitness

if evaluations >= self.budget:
break

# Update F and CR adaptively based on successes
if successful_F:
F = np.mean(successful_F)
CR = np.mean(successful_CR)

# Enhanced hybrid local search phase
local_best = best_solution.copy()
for _ in range(10):
perturbation = np.random.normal(0, 0.1, self.dimension)
local_trial = np.clip(local_best + perturbation, self.lower_bound, self.upper_bound)
local_fitness = func(local_trial)
evaluations += 1

if local_fitness < best_fitness:
best_solution = local_trial
best_fitness = local_fitness
if evaluations >= self.budget:
break

return best_fitness, best_solution
77 changes: 77 additions & 0 deletions nevergrad/optimization/lama/AADMEM.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
import numpy as np


class AADMEM:
def __init__(
self, budget, population_size=50, crossover_rate=0.9, F_base=0.5, F_amp=0.3, memory_size=100
):
self.budget = budget
self.population_size = population_size
self.crossover_rate = crossover_rate
self.F_base = F_base
self.F_amp = F_amp
self.memory_size = memory_size

def __call__(self, func):
lb = -5.0
ub = 5.0
dimension = 5

# Initialize population uniformly within bounds
population = lb + (ub - lb) * np.random.rand(self.population_size, dimension)
fitness = np.array([func(individual) for individual in population])

# Memory to store good solutions
memory = np.empty((0, dimension))

# Best solution tracking
best_idx = np.argmin(fitness)
best_solution = population[best_idx]
best_fitness = fitness[best_idx]

evaluations = self.population_size

while evaluations < self.budget:
for i in range(self.population_size):
# Adaptive mutation factor that changes over time to increase exploration
F = self.F_base + self.F_amp * np.sin(np.pi * evaluations / self.budget)

# Mutation vectors from population and occasionally from memory
idxs = [idx for idx in range(self.population_size) if idx != i]
a, b, c = population[np.random.choice(idxs, 3, replace=False)]
if memory.shape[0] > 0 and np.random.rand() < 0.1: # 10% chance to use memory
a = memory[np.random.randint(0, memory.shape[0])]

mutant = np.clip(a + F * (b - c), lb, ub)

# Crossover
cross_points = np.random.rand(dimension) < self.crossover_rate
if not np.any(cross_points):
cross_points[np.random.randint(0, dimension)] = True
trial = np.where(cross_points, mutant, population[i])

# Selection
trial_fitness = func(trial)
evaluations += 1
if trial_fitness < fitness[i]:
# Update memory with the old solution
if memory.shape[0] < self.memory_size:
memory = np.vstack([memory, population[i]])
else:
# Replace a random entry in memory
memory[np.random.randint(0, self.memory_size)] = population[i]

# Update population with the new better solution
population[i] = trial
fitness[i] = trial_fitness

# Update the best solution
if trial_fitness < best_fitness:
best_solution = trial
best_fitness = trial_fitness

# Check if budget exhausted
if evaluations >= self.budget:
break

return best_fitness, best_solution
Loading