Skip to content

Commit eebe855

Browse files
nychiangthartland
andauthored
Add EI and Branin example in (#715)
* create branch * multiplying the EI acquisition function since we are minimizing it * minor fix --------- Co-authored-by: Tucker Hartland <tucker.hartland@gmail.com>
1 parent ff0686b commit eebe855

File tree

11 files changed

+137
-16
lines changed

11 files changed

+137
-16
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
[build-system]
2-
requires = ["setuptools", "wheel", "numpy"]
2+
requires = ["setuptools", "wheel", "numpy", "smt", "cyipopt"]
33
build-backend = "setuptools.build_meta"

setup.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,14 +12,14 @@
1212

1313
metadata = dict(
1414
name="hiopbbpy",
15-
version="0.0.1",
15+
version="0.0.2",
1616
description="HiOp black box optimization (hiopbbpy)",
1717
author="Tucker hartland et al.",
1818
author_email="hartland1@llnl.gov",
1919
license="BSD-3",
2020
packages=find_packages(where="src"),
2121
package_dir={"": "src"},
22-
install_requires=["smt"],
22+
install_requires=["smt", "cyipopt"],
2323
python_requires=">=3.9",
2424
zip_safe=False,
2525
url="https://github.com/LLNL/hiop",

src/Drivers/hiopbbpy/BODriver.py

Lines changed: 15 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
from LpNormProblem import LpNormProblem
1515
from hiopbbpy.surrogate_modeling import smtKRG
1616
from hiopbbpy.opt import BOAlgorithm
17+
from hiopbbpy.problems import BraninProblem
1718

1819

1920
### parameters
@@ -23,7 +24,9 @@
2324
nx = 2 # dimension of the problem
2425
xlimits = np.array([[-5, 5], [-5, 5]]) # bounds on optimization variable
2526

26-
problem = LpNormProblem(nx, xlimits)
27+
#problem = LpNormProblem(nx, xlimits)
28+
problem = BraninProblem()
29+
2730
print(problem.name, " problem")
2831

2932
### initial training set
@@ -34,9 +37,14 @@
3437
gp_model = smtKRG(theta, xlimits, nx)
3538
gp_model.train(x_train, y_train)
3639

37-
# Instantiate and run Bayesian Optimization
38-
bo = BOAlgorithm(gp_model, x_train, y_train)
39-
bo.optimize(problem)
40-
41-
# Retrieve optimal point
42-
x_opt, y_opt = bo.getOptimalPoint()
40+
acquisition_types = ["LCB", "EI"]
41+
for acquisition_type in acquisition_types:
42+
print("acquisition type: ", acquisition_type)
43+
44+
# Instantiate and run Bayesian Optimization
45+
bo = BOAlgorithm(gp_model, x_train, y_train, acquisition_type = acquisition_type) #EI or LCB
46+
bo.optimize(problem)
47+
48+
# Retrieve optimal point
49+
x_opt, y_opt = bo.getOptimalPoint()
50+
print()

src/hiopbbpy/opt/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,10 @@
11
from .boalgorithm import (BOAlgorithmBase, BOAlgorithm)
2-
from .acquisition import (acquisition, LCBacquisition)
2+
from .acquisition import (acquisition, LCBacquisition, EIacquisition)
33

44
__all__ = [
55
"BOAlgorithmBase"
66
"BOAlgorithm"
77
"acquisition"
88
"LCBacquisition"
9+
"EIacquisition"
910
]

src/hiopbbpy/opt/acquisition.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
"""
77

88
import numpy as np
9+
from scipy.stats import norm
910
from ..surrogate_modeling.gp import GaussianProcess
1011

1112
# A base class for acquisition functions
@@ -30,3 +31,28 @@ def evaluate(self, x : np.ndarray) -> np.ndarray:
3031
mu = self.gpsurrogate.mean(x)
3132
sig = self.gpsurrogate.variance(x)
3233
return mu - self.beta * np.sqrt(sig)
34+
35+
# A subclass of acquisition, implementing the Expected improvement (EI) acquisition function.
36+
class EIacquisition(acquisition):
37+
def __init__(self, gpsurrogate):
38+
super().__init__(gpsurrogate)
39+
40+
# Method to evaluate the acquisition function at x.
41+
def evaluate(self, x : np.ndarray) -> np.ndarray:
42+
y_data = self.gpsurrogate.training_y
43+
y_min = y_data[np.argmin(y_data[:, 0])]
44+
45+
pred = self.gpsurrogate.mean(x)
46+
sig = self.gpsurrogate.variance(x)
47+
48+
retval = []
49+
if sig.size == 1 and np.abs(sig) > 1e-12:
50+
arg0 = (y_min - pred) / sig
51+
retval = (y_min - pred) * norm.cdf(arg0) + sig * norm.pdf(arg0)
52+
retval *= -1.
53+
elif sig.size == 1 and np.abs(sig) <= 1e-12:
54+
retval = 0.0
55+
elif sig.size > 1:
56+
NotImplementedError("TODO --- Not implemented yet!")
57+
58+
return retval

src/hiopbbpy/opt/boalgorithm.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
from numpy.random import uniform
1010
from scipy.optimize import minimize
1111
from ..surrogate_modeling.gp import GaussianProcess
12-
from .acquisition import LCBacquisition
12+
from .acquisition import LCBacquisition, EIacquisition
1313
from ..problems.problem import Problem
1414

1515
# A base class defining a general framework for Bayesian Optimization
@@ -59,7 +59,7 @@ class BOAlgorithm(BOAlgorithmBase):
5959
def __init__(self, gpsurrogate, xtrain, ytrain, acquisition_type = "LCB"):
6060
super().__init__()
6161
assert isinstance(gpsurrogate, GaussianProcess)
62-
assert acquisition_type in ["LCB"]
62+
assert acquisition_type in ["LCB", "EI"]
6363
self.setTrainingData(xtrain, ytrain)
6464
self.setAcquisitionType(acquisition_type)
6565
self.gpsurrogate = gpsurrogate
@@ -84,6 +84,8 @@ def _find_best_point(self, x_train, y_train, x0 = None):
8484

8585
if self.acquisition_type == "LCB":
8686
acqf = LCBacquisition(self.gpsurrogate)
87+
elif self.acquisition_type == "EI":
88+
acqf = EIacquisition(self.gpsurrogate)
8789
else:
8890
raise NotImplementedError("No implemented acquisition_type associated to"+self.acquisition_type)
8991

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
"""
2+
Implementation of the Branin problem
3+
4+
min f(x_1, x_2) = \left( x_2 - \frac{5.1}{4\pi^2} x_1^2 + \frac{5}{\pi} x_1 - 6 \right)^2 + 10 \left(1 - \frac{1}{8\pi}\right) \cos(x_1) + 10.
5+
s.t x_1 \in [-5, 10], \quad x_2 \in [0, 15].
6+
7+
It has three global minima at:
8+
(x_1, x_2) = (-\pi, 12.275)
9+
(\pi, 2.275)
10+
(9.42478, 2.475)
11+
and the optimal objective 0.3979
12+
13+
Authors: Tucker Hartland <hartland1@llnl.gov>
14+
Nai-Yuan Chiang <chiang7@llnl.gov>
15+
"""
16+
import numpy as np
17+
from .problem import Problem
18+
from numpy.random import uniform
19+
20+
# define the Branin problem class
21+
class BraninProblem(Problem):
22+
def __init__(self):
23+
ndim = 2
24+
xlimits = np.array([[-5.0, 10], [0.0, 15]])
25+
name = 'Branin'
26+
super().__init__(ndim, xlimits, name=name)
27+
28+
def _evaluate(self, x: np.ndarray) -> np.ndarray:
29+
30+
ne, nx = x.shape
31+
assert nx == self.ndim
32+
33+
y = np.zeros((ne, 1), complex)
34+
b = 5.1 / (4.0 * (np.pi) ** 2)
35+
c = 5.0 / np.pi
36+
r = 6.0
37+
s = 10.0
38+
t = 1.0 / (8.0 * np.pi)
39+
40+
arg1 = (x[:,1] - b * x[:,0]**2 + c * x[:,0] - r)
41+
y[:,0] = arg1**2 + s * (1 - t) * np.cos(x[:,0]) + s
42+
43+
'''
44+
# compute derivatives
45+
dy_dx0 = 2*arg1*(-2*b*x[:,0]+c) - s*(1-t)*np.sin(x[:,0])
46+
dy_dx1 = 2*arg1
47+
48+
dy_dx = np.array([dy_dx0, dy_dx1])
49+
'''
50+
51+
return y
52+
53+
54+
55+
56+

src/hiopbbpy/problems/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
from .problem import Problem
2+
from .BraninProblem import (BraninProblem)
23

34
__all__ = [
45
"Problem"
6+
"BraninProblem"
57
]
68

src/hiopbbpy/problems/problem.py

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
"""
77
import numpy as np
88
from numpy.random import uniform
9+
from scipy.stats import qmc
910

1011
# define the general optimization problem class
1112
class Problem:
@@ -17,6 +18,7 @@ def __init__(self, ndim, xlimits, name=None):
1718
self.name = " "
1819
else:
1920
self.name = name
21+
self.sampler = qmc.LatinHypercube(ndim)
2022

2123
def _evaluate(self, x: np.ndarray) -> np.ndarray:
2224
"""
@@ -66,9 +68,16 @@ def sample(self, nsample: int) -> np.ndarray:
6668
ndarray[nsample, nx]
6769
Samples from domain defined by xlimits
6870
"""
69-
xsample = np.zeros((nsample, self.ndim))
70-
for j in range(self.ndim):
71-
xsample[:, j] = uniform(self.xlimits[j][0], self.xlimits[j][1], size=nsample)
71+
72+
# uniform
73+
# xsample = np.zeros((nsample, self.ndim))
74+
# for j in range(self.ndim):
75+
# xsample[:, j] = uniform(self.xlimits[j][0], self.xlimits[j][1], size=nsample)
76+
77+
# from predefined sampler
78+
xsample = self.sampler.random(nsample)
79+
xsample = self.xlimits[:,0] + (self.xlimits[:,1] - self.xlimits[:,0]) * xsample
80+
7281
return xsample
7382

7483

src/hiopbbpy/surrogate_modeling/gp.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,9 @@ class GaussianProcess:
1212
def __init__(self, ndim, xlimits=None):
1313
self.ndim = ndim
1414
self.xlimits = xlimits
15+
self.training_x = []
16+
self.training_y = []
17+
self.trained = False
1518

1619
# Abstract method for computing the mean of the GP at a given input x
1720
def mean(self, x: np.ndarray) -> np.ndarray:
@@ -72,3 +75,15 @@ def get_bounds(self):
7275
else:
7376
return [(self.xlimits[i][0], self.xlimits[i][1]) for i in range(self.ndim)]
7477

78+
# Abstract method for training the GP
79+
def train(self, x: np.ndarray, y: np.ndarray) -> np.ndarray:
80+
"""
81+
train the GP model
82+
83+
Parameters
84+
---------
85+
x : ndarray[n, nx]
86+
y : ndarray[n, 1]
87+
88+
"""
89+
NotImplementedError("Child class of GaussianProcess should implement method train")

0 commit comments

Comments
 (0)