Skip to content

Commit f2c38f7

Browse files
nychiangthartland
andauthored
Add github workflow tests for HiOpBBPy (#716)
* test CI * remove ipopt as a dependency * update some parameters * address Cosmin's comments * test CI * address comments * fix bug, turn on CI * save y_opt from 1000 runs * address comments * moving saved data from the base hiop directory in order to avoid overcrowding, obtaining the path of the python __file__ in order that we can load the saved data according to the python file and not from where the file is run from. This enables us to be able to run (from hiop base dir) python src/Drivers/hiopbbpy/BODriver.py and (from hiop/src/Drivers/hiopbbpy) python BODriver.py. Previously there was an error when I tried running the latter case. --------- Co-authored-by: Tucker Hartland <tucker.hartland@gmail.com>
1 parent eebe855 commit f2c38f7

File tree

7 files changed

+156
-35
lines changed

7 files changed

+156
-35
lines changed
Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
name: test_hiopbbpy
2+
3+
on: [push]
4+
5+
jobs:
6+
test:
7+
runs-on: ${{ matrix.os }}
8+
strategy:
9+
matrix:
10+
os: [ubuntu-latest]
11+
12+
steps:
13+
- name: Checkout code
14+
uses: actions/checkout@v4
15+
16+
- name: Set up Python
17+
uses: actions/setup-python@v4 # Set up Python environment
18+
with:
19+
python-version: '3.13'
20+
21+
22+
- name: Install HiOpBBPy and its dependencies
23+
run: |
24+
python -m pip install --upgrade pip
25+
pip install .
26+
27+
- name: Run Tests
28+
run: |
29+
python src/Drivers/hiopbbpy/BODriver.py 10
30+
continue-on-error: false
31+
32+

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
[build-system]
2-
requires = ["setuptools", "wheel", "numpy", "smt", "cyipopt"]
2+
requires = ["setuptools", "wheel", "numpy", "smt"]
33
build-backend = "setuptools.build_meta"

setup.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,14 +12,14 @@
1212

1313
metadata = dict(
1414
name="hiopbbpy",
15-
version="0.0.2",
15+
version="0.0.3",
1616
description="HiOp black box optimization (hiopbbpy)",
1717
author="Tucker hartland et al.",
1818
author_email="hartland1@llnl.gov",
1919
license="BSD-3",
2020
packages=find_packages(where="src"),
2121
package_dir={"": "src"},
22-
install_requires=["smt", "cyipopt"],
22+
install_requires=["smt"],
2323
python_requires=">=3.9",
2424
zip_safe=False,
2525
url="https://github.com/LLNL/hiop",

src/Drivers/hiopbbpy/BODriver.py

Lines changed: 105 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,13 @@
55
2) define a Kriging-based Gaussian-process (smt backend)
66
trained on said data
77
3) determine the minimizer via BOAlgorithm
8+
9+
Authors: Tucker Hartland <hartland1@llnl.gov>
10+
Nai-Yuan Chiang <chiang7@llnl.gov>
811
"""
912

13+
import sys
14+
import os
1015
import numpy as np
1116
import matplotlib.pyplot as plt
1217
import warnings
@@ -17,34 +22,106 @@
1722
from hiopbbpy.problems import BraninProblem
1823

1924

20-
### parameters
21-
n_samples = 5 # number of the initial samples to train GP
22-
theta = 1.e-2 # hyperparameter for GP kernel
25+
# Get user input for the number of repetitions from command-line arguments
26+
if len(sys.argv) != 2 or int(sys.argv[1]) < 0:
27+
num_repeat = 1
28+
else:
29+
num_repeat = int(sys.argv[1])
2330

24-
nx = 2 # dimension of the problem
31+
### parameters
32+
n_samples = 5 # number of the initial samples to train GP
33+
theta = 1.e-2 # hyperparameter for GP kernel
34+
nx = 2 # dimension of the problem
2535
xlimits = np.array([[-5, 5], [-5, 5]]) # bounds on optimization variable
2636

27-
#problem = LpNormProblem(nx, xlimits)
28-
problem = BraninProblem()
29-
30-
print(problem.name, " problem")
31-
32-
### initial training set
33-
x_train = problem.sample(n_samples)
34-
y_train = problem.evaluate(x_train)
35-
36-
# Define the GP surrogate model
37-
gp_model = smtKRG(theta, xlimits, nx)
38-
gp_model.train(x_train, y_train)
39-
40-
acquisition_types = ["LCB", "EI"]
41-
for acquisition_type in acquisition_types:
42-
print("acquisition type: ", acquisition_type)
43-
44-
# Instantiate and run Bayesian Optimization
45-
bo = BOAlgorithm(gp_model, x_train, y_train, acquisition_type = acquisition_type) #EI or LCB
46-
bo.optimize(problem)
47-
48-
# Retrieve optimal point
49-
x_opt, y_opt = bo.getOptimalPoint()
50-
print()
37+
### saved solutions --- from 1000 repetitions
38+
saved_min_obj = {"LpNorm": {"LCB": 0.0007586314501994839, "EI": 0.002094016049616341}, "Branin": {"LCB": 0.3979820338569908, "EI": 0.39789916461969455}}
39+
saved_mean_obj = {"LpNorm": {"LCB": 0.018774638321851504, "EI": 0.11583915178648867}, "Branin": {"LCB": 0.5079001079219421, "EI": 0.4377466109837465}}
40+
saved_max_obj = {"LpNorm": {"LCB": 0.0755173754382861, "EI": 0.4175676394969743}, "Branin": {"LCB": 1.107240543567082, "EI": 0.7522382699410031}}
41+
script_dir = os.path.dirname(__file__)
42+
saved_yopt = np.load(script_dir + "/yopt_20iter_1000run.npy",allow_pickle=True).item()
43+
44+
prob_type_l = ["LpNorm", "Branin"]
45+
acq_type_l = ["LCB", "EI"]
46+
47+
mean_obj = {}
48+
max_obj = {}
49+
min_obj = {}
50+
y_opt = {}
51+
52+
retval = 0
53+
for prob_type in prob_type_l:
54+
print()
55+
if prob_type == "LpNorm":
56+
problem = LpNormProblem(nx, xlimits)
57+
else:
58+
problem = BraninProblem()
59+
60+
if prob_type not in mean_obj:
61+
mean_obj[prob_type] = {}
62+
max_obj[prob_type] = {}
63+
min_obj[prob_type] = {}
64+
y_opt[prob_type] = {}
65+
66+
for acq_type in acq_type_l:
67+
if acq_type not in mean_obj[prob_type]:
68+
mean_obj[prob_type][acq_type] = 0
69+
max_obj[prob_type][acq_type] = -np.inf
70+
min_obj[prob_type][acq_type] = np.inf
71+
y_opt[prob_type][acq_type] = np.zeros(num_repeat)
72+
73+
print("Problem name: ", problem.name)
74+
print("Acquisition type: ", acq_type)
75+
76+
for n_repeat in range(num_repeat):
77+
print("Run: ", n_repeat, "/", num_repeat)
78+
### initial training set
79+
x_train = problem.sample(n_samples)
80+
y_train = problem.evaluate(x_train)
81+
82+
### Define the GP surrogate model
83+
gp_model = smtKRG(theta, xlimits, nx)
84+
gp_model.train(x_train, y_train)
85+
86+
# Instantiate and run Bayesian Optimization
87+
bo = BOAlgorithm(gp_model, x_train, y_train, acquisition_type = acq_type) #EI or LCB
88+
bo.optimize(problem)
89+
90+
# Retrieve optimal objec
91+
y_opt[prob_type][acq_type][n_repeat] = bo.getOptimalObjective()
92+
93+
mean_obj[prob_type][acq_type] += y_opt[prob_type][acq_type][n_repeat]
94+
max_obj[prob_type][acq_type] = max(max_obj[prob_type][acq_type], y_opt[prob_type][acq_type][n_repeat])
95+
min_obj[prob_type][acq_type] = min(min_obj[prob_type][acq_type], y_opt[prob_type][acq_type][n_repeat])
96+
97+
#if(num_repeat >= 1000 ):
98+
# np.save("yopt_20iter_1000run.npy", y_opt)
99+
100+
# Define percentiles
101+
left_percentile = 1 # 5 or 1
102+
right_percentile = 100 - left_percentile # 95 or 99
103+
104+
print("Summary:")
105+
for prob_type in prob_type_l:
106+
for acq_type in acq_type_l:
107+
mean_obj[prob_type][acq_type] /= num_repeat
108+
print("(Min,Mean,Max) Opt.Obj for", prob_type, "-", acq_type, ":\t(", min_obj[prob_type][acq_type], ",",mean_obj[prob_type][acq_type], ",", max_obj[prob_type][acq_type], ")")
109+
110+
### verify the results with the saved results
111+
left_value = np.percentile(saved_yopt[prob_type][acq_type], left_percentile)
112+
right_value = np.percentile(saved_yopt[prob_type][acq_type], right_percentile)
113+
114+
is_failed = (y_opt[prob_type][acq_type] < left_value) | (y_opt[prob_type][acq_type] > right_value)
115+
num_fail = np.sum(is_failed)
116+
117+
# currently ci test is only applied to num_repeat == 10
118+
if num_fail >= 3 and num_repeat == 10:
119+
print(num_fail, "test(s) fail(s):", y_opt[prob_type][acq_type][is_failed])
120+
print("Recorded (min, mean, max): (", saved_min_obj[prob_type][acq_type], ",", saved_mean_obj[prob_type][acq_type], ",", saved_max_obj[prob_type][acq_type], ")")
121+
retval = 1
122+
123+
sys.exit(retval)
124+
125+
126+
127+
31.7 KB
Binary file not shown.

src/hiopbbpy/opt/acquisition.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ def evaluate(self, x : np.ndarray) -> np.ndarray:
4343
y_min = y_data[np.argmin(y_data[:, 0])]
4444

4545
pred = self.gpsurrogate.mean(x)
46-
sig = self.gpsurrogate.variance(x)
46+
sig = np.sqrt(self.gpsurrogate.variance(x))
4747

4848
retval = []
4949
if sig.size == 1 and np.abs(sig) > 1e-12:

src/hiopbbpy/opt/boalgorithm.py

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
import numpy as np
99
from numpy.random import uniform
1010
from scipy.optimize import minimize
11+
from scipy.stats import qmc
1112
from ..surrogate_modeling.gp import GaussianProcess
1213
from .acquisition import LCBacquisition, EIacquisition
1314
from ..problems.problem import Problem
@@ -18,6 +19,7 @@ def __init__(self):
1819
self.acquisition_type = "LCB" # Type of acquisition function (default = "LCB")
1920
self.xtrain = None # Training data
2021
self.ytrain = None # Training data
22+
self.prob = None # Problem structure
2123
self.n_iter = 20 # Maximum number of optimization steps
2224
self.n_start = 10 # estimating acquisition global optima by determining local optima n_start times and then determining the discrete max of that set
2325
self.q = 1 # batch size
@@ -48,11 +50,15 @@ def getOptimizationHistory(self):
4850
y_hist = np.array(self.y_hist, copy=True)
4951
return x_hist, y_hist
5052

51-
# Method to return the optimal solution and objective
53+
# Method to return the optimal solution
5254
def getOptimalPoint(self):
5355
x_opt = np.array(self.x_opt, copy=True)
56+
return x_opt
57+
58+
# Method to return the optimal objective
59+
def getOptimalObjective(self):
5460
y_opt = np.array(self.y_opt, copy=True)
55-
return x_opt, y_opt
61+
return y_opt
5662

5763
# A subclass of BOAlgorithmBase implementing a full Bayesian Optimization workflow
5864
class BOAlgorithm(BOAlgorithmBase):
@@ -96,8 +102,11 @@ def _find_best_point(self, x_train, y_train, x0 = None):
96102
for ii in range(self.n_start):
97103
success = False
98104
# Generate random starting point if x0 is not provided
99-
if x0 is None:
105+
if x0 is None and self.prob is not None:
106+
x0 = self.prob.sample(1)
107+
else:
100108
x0 = np.array([uniform(b[0], b[1]) for b in self.bounds])
109+
101110
xopt, yout, success = self.acqf_minimizer_callback(acqf_callback, x0)
102111

103112
if success:
@@ -118,6 +127,7 @@ def set_options(self, options):
118127

119128
# Method to perform Bayesian optimization
120129
def optimize(self, prob:Problem):
130+
self.problem = prob
121131
x_train = self.xtrain
122132
y_train = self.ytrain
123133

@@ -162,6 +172,8 @@ def optimize(self, prob:Problem):
162172
print(f"Optimal at BO iteration: {self.idx_opt-n_init_sample+1} ")
163173

164174
print(f"Optimal point: {self.x_opt.flatten()}, Optimal value: {self.y_opt}")
175+
print()
176+
165177

166178

167179
# Find the minimum of the input objective `fun`, using the minimize function from SciPy.

0 commit comments

Comments
 (0)