Skip to content

Commit 78539d4

Browse files
committed
test CI
1 parent eebe855 commit 78539d4

File tree

8 files changed

+117
-31
lines changed

8 files changed

+117
-31
lines changed
Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
name: test_hiopbbpy
2+
3+
on: [push]
4+
5+
jobs:
6+
test:
7+
runs-on: ${{ matrix.os }}
8+
strategy:
9+
matrix:
10+
os: [ubuntu-latest]
11+
12+
steps:
13+
- name: Checkout code
14+
uses: actions/checkout@v4
15+
16+
- name: Set up Python
17+
uses: actions/setup-python@v4 # Set up Python environment
18+
with:
19+
python-version: '3.13'
20+
21+
22+
- name: Install HiOpBBPy and its dependencies
23+
run: |
24+
python -m pip install --upgrade pip
25+
pip install .
26+
27+
- name: Run Tests
28+
run: |
29+
python src/Drivers/hiopbbpy/BODriver.py
30+
continue-on-error: false
31+
32+

src/Drivers/hiopbbpy/BODriver.py

Lines changed: 70 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
3) determine the minimizer via BOAlgorithm
88
"""
99

10+
import sys
1011
import numpy as np
1112
import matplotlib.pyplot as plt
1213
import warnings
@@ -17,34 +18,75 @@
1718
from hiopbbpy.problems import BraninProblem
1819

1920

20-
### parameters
21-
n_samples = 5 # number of the initial samples to train GP
22-
theta = 1.e-2 # hyperparameter for GP kernel
21+
# Get user input for the number of repetitions from command-line arguments
22+
if len(sys.argv) != 2:
23+
num_repeat = 1
24+
else:
25+
num_repeat = int(sys.argv[1])
2326

24-
nx = 2 # dimension of the problem
27+
### parameters
28+
n_samples = 5 # number of the initial samples to train GP
29+
theta = 1.e-2 # hyperparameter for GP kernel
30+
nx = 2 # dimension of the problem
2531
xlimits = np.array([[-5, 5], [-5, 5]]) # bounds on optimization variable
2632

27-
#problem = LpNormProblem(nx, xlimits)
28-
problem = BraninProblem()
29-
30-
print(problem.name, " problem")
31-
32-
### initial training set
33-
x_train = problem.sample(n_samples)
34-
y_train = problem.evaluate(x_train)
35-
36-
# Define the GP surrogate model
37-
gp_model = smtKRG(theta, xlimits, nx)
38-
gp_model.train(x_train, y_train)
39-
40-
acquisition_types = ["LCB", "EI"]
41-
for acquisition_type in acquisition_types:
42-
print("acquisition type: ", acquisition_type)
43-
44-
# Instantiate and run Bayesian Optimization
45-
bo = BOAlgorithm(gp_model, x_train, y_train, acquisition_type = acquisition_type) #EI or LCB
46-
bo.optimize(problem)
47-
48-
# Retrieve optimal point
49-
x_opt, y_opt = bo.getOptimalPoint()
50-
print()
33+
### saved solutions
34+
saved_sol = {"LpNorm": {"LCB": 0.04618462, "EI": 0.44954611}, "Branin": {"LCB": 0.62655919, "EI": 1.9838798}}
35+
36+
prob_type_l = ["LpNorm", "Branin"]
37+
acq_type_l = ["LCB", "EI"]
38+
39+
mean_obj = {}
40+
41+
retval = 0
42+
for prob_type in prob_type_l:
43+
print()
44+
if prob_type == "LpNorm":
45+
problem = LpNormProblem(nx, xlimits)
46+
else:
47+
problem = BraninProblem()
48+
49+
if prob_type not in mean_obj:
50+
mean_obj[prob_type] = {}
51+
52+
for acq_type in acq_type_l:
53+
if acq_type not in mean_obj[prob_type]:
54+
mean_obj[prob_type][acq_type] = 0
55+
56+
print("Problem name: ", problem.name)
57+
print("Acquisition type: ", acq_type)
58+
59+
for n_repeat in range(num_repeat):
60+
print("Run: ", n_repeat, "/", num_repeat)
61+
### initial training set
62+
x_train = problem.sample(n_samples)
63+
y_train = problem.evaluate(x_train)
64+
65+
### Define the GP surrogate model
66+
gp_model = smtKRG(theta, xlimits, nx)
67+
gp_model.train(x_train, y_train)
68+
69+
# Instantiate and run Bayesian Optimization
70+
bo = BOAlgorithm(gp_model, x_train, y_train, acquisition_type = acq_type) #EI or LCB
71+
bo.optimize(problem)
72+
73+
# Retrieve optimal objec
74+
y_opt = bo.getOptimalObjective()
75+
76+
mean_obj[prob_type][acq_type] += y_opt
77+
78+
for prob_type in prob_type_l:
79+
for acq_type in acq_type_l:
80+
mean_obj[prob_type][acq_type] /= num_repeat
81+
print("Mean Opt.Obj for ", prob_type, "-", acq_type, mean_obj[prob_type][acq_type])
82+
83+
r_error = np.abs((mean_obj[prob_type][acq_type] - saved_sol[prob_type][acq_type])/saved_sol[prob_type][acq_type])
84+
if r_error > 0.5:
85+
print("Relative Error > 0.5: ", r_error)
86+
print("Recorded Solution:", saved_sol[prob_type][acq_type])
87+
retval = 1
88+
89+
sys.exit(retval)
90+
91+
92+

src/hiopbbpy/opt/boalgorithm.py

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
import numpy as np
99
from numpy.random import uniform
1010
from scipy.optimize import minimize
11+
from scipy.stats import qmc
1112
from ..surrogate_modeling.gp import GaussianProcess
1213
from .acquisition import LCBacquisition, EIacquisition
1314
from ..problems.problem import Problem
@@ -18,6 +19,7 @@ def __init__(self):
1819
self.acquisition_type = "LCB" # Type of acquisition function (default = "LCB")
1920
self.xtrain = None # Training data
2021
self.ytrain = None # Training data
22+
self.prob = None # Problem structure
2123
self.n_iter = 20 # Maximum number of optimization steps
2224
self.n_start = 10 # estimating acquisition global optima by determining local optima n_start times and then determining the discrete max of that set
2325
self.q = 1 # batch size
@@ -48,11 +50,15 @@ def getOptimizationHistory(self):
4850
y_hist = np.array(self.y_hist, copy=True)
4951
return x_hist, y_hist
5052

51-
# Method to return the optimal solution and objective
53+
# Method to return the optimal solution
5254
def getOptimalPoint(self):
5355
x_opt = np.array(self.x_opt, copy=True)
56+
return x_opt
57+
58+
# Method to return the optimal objective
59+
def getOptimalObjective(self):
5460
y_opt = np.array(self.y_opt, copy=True)
55-
return x_opt, y_opt
61+
return y_opt
5662

5763
# A subclass of BOAlgorithmBase implementing a full Bayesian Optimization workflow
5864
class BOAlgorithm(BOAlgorithmBase):
@@ -96,8 +102,11 @@ def _find_best_point(self, x_train, y_train, x0 = None):
96102
for ii in range(self.n_start):
97103
success = False
98104
# Generate random starting point if x0 is not provided
99-
if x0 is None:
105+
if x0 is None and self.prob is not None:
106+
x0 = self.prob.sample(1)
107+
else:
100108
x0 = np.array([uniform(b[0], b[1]) for b in self.bounds])
109+
101110
xopt, yout, success = self.acqf_minimizer_callback(acqf_callback, x0)
102111

103112
if success:
@@ -118,6 +127,7 @@ def set_options(self, options):
118127

119128
# Method to perform Bayesian optimization
120129
def optimize(self, prob:Problem):
130+
self.problem = prob
121131
x_train = self.xtrain
122132
y_train = self.ytrain
123133

@@ -162,6 +172,8 @@ def optimize(self, prob:Problem):
162172
print(f"Optimal at BO iteration: {self.idx_opt-n_init_sample+1} ")
163173

164174
print(f"Optimal point: {self.x_opt.flatten()}, Optimal value: {self.y_opt}")
175+
print()
176+
165177

166178

167179
# Find the minimum of the input objective `fun`, using the minimize function from SciPy.

0 commit comments

Comments
 (0)