Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

metadata = dict(
name="hiopbbpy",
version="0.0.4",
version="0.0.5",
description="HiOp black box optimization (hiopbbpy)",
author="Tucker hartland et al.",
author_email="hartland1@llnl.gov",
Expand Down
3 changes: 0 additions & 3 deletions src/Drivers/hiopbbpy/BODriverCI.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,9 +103,6 @@
max_obj[prob_type][acq_type] = max(max_obj[prob_type][acq_type], y_opt[prob_type][acq_type][n_repeat])
min_obj[prob_type][acq_type] = min(min_obj[prob_type][acq_type], y_opt[prob_type][acq_type][n_repeat])

#if(num_repeat >= 1000 ):
# np.save("yopt_20iter_1000run.npy", y_opt)

# Define percentiles
left_percentile = 1 # 5 or 1
right_percentile = 100 - left_percentile # 95 or 99
Expand Down
5 changes: 3 additions & 2 deletions src/Drivers/hiopbbpy/BODriverEX.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,9 @@ def con_jac_ineq(x):
for prob_type in prob_type_l:
print()
if prob_type == "LpNorm":
problem = LpNormProblem(nx, xlimits, constraints=None)
problem = LpNormProblem(nx, xlimits)
else:
problem = BraninProblem(constraints=None)
problem = BraninProblem()
problem.set_constraints(user_constraint)
Comment on lines +60 to 63
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have two ways to add the constraints:

  1. in the constructor, setting constraints=user_constraint
    or
  2. using function set_constraints.
    I should leave a comment in this example.

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, I understand that there are two methods to set constraints. However, passing None as the constraints to BraninProblem is equivalent to instantiating a BraninProblem with no arguments since the default constraints argument of BraninProblem is None. Or at least it was until I recently changed the default to [ ].


for acq_type in acq_type_l:
Expand All @@ -78,6 +78,7 @@ def con_jac_ineq(x):
'acquisition_type': acq_type,
'bo_maxiter': 10,
'opt_solver': 'IPOPT', #"SLSQP" "IPOPT"
'batch_size': 1,
'solver_options': {
'max_iter': 200,
'print_level': 1
Expand Down
2 changes: 1 addition & 1 deletion src/Drivers/hiopbbpy/LpNormProblem.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from hiopbbpy.problems.problem import Problem

class LpNormProblem(Problem):
def __init__(self, ndim, xlimits, p=2.0, constraints=None):
def __init__(self, ndim, xlimits, p=2.0, constraints=[]):
name = "LpNormProblem"
super().__init__(ndim, xlimits, name=name, constraints=constraints)
self.p = p
Expand Down
103 changes: 67 additions & 36 deletions src/hiopbbpy/opt/boalgorithm.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,17 +14,20 @@
from .acquisition import LCBacquisition, EIacquisition
from ..problems.problem import Problem
from .optproblem import IpoptProb
from smt.applications.ego import Evaluator

# A base class defining a general framework for Bayesian Optimization
class BOAlgorithmBase:
def __init__(self):
self.acquisition_type = "LCB" # Type of acquisition function (default = "LCB")
self.batch_type = "KB" # strategy for qEI
self.xtrain = None # Training data
self.ytrain = None # Training data
self.prob = None # Problem structure
self.evaluator = Evaluator() # compute control objective evaluations
self.bo_maxiter = 20 # Maximum number of Bayesian optimization steps
self.n_start = 10 # estimating acquisition global optima by determining local optima n_start times and then determining the discrete max of that set
self.q = 1 # batch size
self.batch_size = 1 # batch size
# save some internal member train
self.y_hist = None # History of evaluations
self.x_hist = None # History of evaluations
Expand All @@ -33,9 +36,9 @@ def __init__(self):
self.idx_opt = None # Index of the best observed value in the history

# Sets the acquisition function type and batch size
def setAcquisitionType(self, acquisition_type, q=1):
def setAcquisitionType(self, acquisition_type, batch_size=1):
self.acquisition_type = acquisition_type
self.q = q
self.batch_size = batch_size

# Sets the training data
def setTrainingData(self, xtrain, ytrain):
Expand Down Expand Up @@ -66,7 +69,7 @@ def getOptimalObjective(self):
class BOAlgorithm(BOAlgorithmBase):
def __init__(self, prob:Problem, gpsurrogate:GaussianProcess, xtrain, ytrain,
user_grad = None,
options = None):
options = {}):
super().__init__()

assert isinstance(gpsurrogate, GaussianProcess)
Expand All @@ -77,21 +80,21 @@ def __init__(self, prob:Problem, gpsurrogate:GaussianProcess, xtrain, ytrain,
self.bounds = self.gpsurrogate.get_bounds()
self.fun_grad = None

if options and 'bo_maxiter' in options:
self.bo_maxiter = options['bo_maxiter']
assert self.bo_maxiter > 0, f"Invalid bo_maxiter: {self.bo_maxiter }"
self.bo_maxiter = options.get('bo_maxiter', self.bo_maxiter)
assert self.bo_maxiter > 0, f"Invalid bo_maxiter: {self.bo_maxiter }"

self.solver_options = {"maxiter": 200}
self.solver_options = options.get('solver_options', self.solver_options)

if options and 'solver_options' in options:
self.solver_options = options['solver_options']
else:
self.solver_options = {"maxiter": 200}
acquisition_type = options.get('acquisition_type', "LCB")
assert acquisition_type in ["LCB", "EI"], f"Invalid acquisition_type: {acquisition_type}"
batch_size = options.get('batch_size', 1)
assert isinstance(batch_size, int), f"batch_size {batch_size} not an integer"
assert batch_size > 0, f"batch_size {batch_size} is not strictly positive"
self.setAcquisitionType(acquisition_type, batch_size)

if options and 'acquisition_type' in options:
acquisition_type = options['acquisition_type']
assert acquisition_type in ["LCB", "EI"], f"Invalid acquisition_type: {acquisition_type}"
else:
acquisition_type = "LCB"
self.setAcquisitionType(acquisition_type)
self.evaluator = options.get('evaluator', self.evaluator)
assert isinstance(self.evaluator, Evaluator)

if options and 'opt_solver' in options:
opt_solver = options['opt_solver']
Expand All @@ -100,6 +103,7 @@ def __init__(self, prob:Problem, gpsurrogate:GaussianProcess, xtrain, ytrain,
opt_solver = "SLSQP"
self.set_method(opt_solver)


if user_grad:
self.fun_grad = user_grad

Expand Down Expand Up @@ -150,6 +154,22 @@ def _find_best_point(self, x_train, y_train, x0 = None):
best_xopt = x_all[np.argmin(np.array(y_all))]

return best_xopt

def _get_virtual_point(self, x):
if self.batch_type not in ["CLmin", "KB", "KBUB", "KBLB", "KBRand"]:
raise NotImplementedError("No implemented batch_type associated to"+self.batch_type)
# constant-liar, Kriging-believer and Kriging-believer variants
if self.batch_type == "CLmin":
return min(self.gpsurrogate.training_y)
elif self.batch_type == "KB":
beta = 0.
elif self.batch_type == "KBUB":
beta = 3.0
elif self.batch_type == "KBLB":
beta = -3.0
elif self.batch_type == "KBRand":
beta = np.random.randn()
return self.gpsurrogate.mean(x) + beta * np.sqrt(self.gpsurrogate.variance(x))

# Set the optimization method
def set_method(self, method):
Expand All @@ -164,8 +184,7 @@ def optimize(self):
x_train = self.xtrain
y_train = self.ytrain

n_init_sample = np.size(x_train,0)
print(f"n_init_sample: {n_init_sample}")
n_init_sample = np.size(x_train, 0)
self._setup_acqf_minimizer_callback()

self.x_hist = []
Expand All @@ -175,31 +194,43 @@ def optimize(self):
print(f"*****************************")
print(f"Iteration {i+1}/{self.bo_maxiter}")

# Get a new sample point
x_new = self._find_best_point(x_train, y_train)

# Evaluate the new sample point
y_new = self.prob.evaluate(np.atleast_2d(x_new))

# Update training set
x_train = np.vstack([x_train, x_new])
y_train_virtual = y_train.copy() # old training + batch_size num of virtual points
for j in range(self.batch_size):
# Get a new sample point
x_new = self._find_best_point(x_train, y_train_virtual)

# Update training sample points
x_train = np.vstack([x_train, x_new ])

# if this is not the last point in the current batch
# then obtain a virtual point
if j < max(range(self.batch_size)):
# Get a virtual point
y_virtual = self._get_virtual_point(np.atleast_2d(x_new))

# Update training set with the virtual point
y_train_virtual = np.vstack([y_train_virtual, y_virtual])

y_new = self.evaluator.run(self.prob.evaluate, x_train[-self.batch_size:])
y_train = np.vstack([y_train, y_new])

# Save the new sample points and objective evaluations
for j in range(1, self.batch_size+1):
self.x_hist.append(x_train[-j].flatten())
self.y_hist.append(y_train[-j].flatten())
if self.batch_size == 1:
print(f"Sample point X: {x_train[-self.batch_size:]}, Observation Y: {y_new}")
else:
print(f"Sample points X: {x_train[-self.batch_size:]}, Observations Y: {y_new}")

# Save the new sample point and its observation
self.x_hist.append(x_new)
self.y_hist.append(y_new)

print(f"Sampled point X: {x_new.flatten()}, Observation Y: {y_new.flatten()}")

# Save the optimal results and all the training data
self.idx_opt = np.argmin(self.y_hist)
self.x_opt = self.y_hist[self.idx_opt]
self.x_opt = self.x_hist[self.idx_opt]
self.y_opt = self.y_hist[self.idx_opt]
self.setTrainingData(x_train, y_train)

print()
print()
print(f"Optimal at BO iteration: {self.idx_opt+1} ")
print(f"\n\nOptimal at BO iteration: {self.idx_opt+1} ")
#if self.idx_opt < n_init_sample:
# print(f"Optimal at initial sample: {self.idx_opt+1}")
#else:
Expand Down
2 changes: 1 addition & 1 deletion src/hiopbbpy/problems/BraninProblem.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@

# define the Branin problem class
class BraninProblem(Problem):
def __init__(self, constraints=None):
def __init__(self, constraints=[]):
ndim = 2
xlimits = np.array([[-5.0, 10], [0.0, 15]])
name = 'Branin'
Expand Down
10 changes: 5 additions & 5 deletions src/hiopbbpy/problems/problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,19 +5,19 @@
Nai-Yuan Chiang <chiang7@llnl.gov>
"""
import numpy as np
import collections.abc
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is collections a new dependency?

Copy link
Copy Markdown
Collaborator Author

@thartland thartland Apr 16, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

collections is a built in module that is not pip installable. See here.

from numpy.random import uniform
from scipy.stats import qmc

# define the general optimization problem class
class Problem:
def __init__(self, ndim, xlimits, name=None, constraints=None):
def __init__(self, ndim, xlimits, name=" ", constraints=[]):
self.ndim = ndim
self.xlimits = xlimits
assert self.xlimits.shape[0] == ndim
if name is None:
self.name = " "
else:
self.name = name
assert isinstance(name, str)
assert isinstance(constraints, collections.abc.Sequence)
self.name = name
self.sampler = qmc.LatinHypercube(ndim)
self.constraints = constraints

Expand Down
Loading