Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 26 additions & 6 deletions .github/workflows/test_hiopbbpy.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,19 +14,39 @@ jobs:
uses: actions/checkout@v4

- name: Set up Python
uses: actions/setup-python@v4 # Set up Python environment
uses: actions/setup-python@v4
with:
python-version: '3.13'
python-version: "3.13"

- name: Set up Conda
uses: conda-incubator/setup-miniconda@v2
with:
auto-activate-base: true
channels: conda-forge

- name: Install IPOPT via conda
run: |
conda init bash
conda install -c conda-forge cyipopt pkg-config
export LD_LIBRARY_PATH=$CONDA_PREFIX/lib:$LD_LIBRARY_PATH

- name: Install HiOpBBPy and its dependencies
- name: Verify cyipopt installation
run: |
source ~/.bashrc
conda activate base
python -c "import cyipopt; print('cyipopt OK')"

- name: Install HiOpBBPy
run: |
source ~/.bashrc
conda activate base
export SKIP_CYIPOPT=1
python -m pip install --upgrade pip
pip install .

- name: Run Tests
run: |
python src/Drivers/hiopbbpy/BODriver.py 10
source ~/.bashrc
conda activate base
python src/Drivers/hiopbbpy/BODriverCI.py 10
continue-on-error: false


10 changes: 8 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,20 +6,26 @@
'''

import sys
import os
import numpy as np
from setuptools import setup, find_packages


install_requires = ["smt"]

if os.getenv("SKIP_CYIPOPT", "0") != "1":
install_requires.append("cyipopt")

metadata = dict(
name="hiopbbpy",
version="0.0.3",
version="0.0.4",
description="HiOp black box optimization (hiopbbpy)",
author="Tucker hartland et al.",
author_email="hartland1@llnl.gov",
license="BSD-3",
packages=find_packages(where="src"),
package_dir={"": "src"},
install_requires=["smt"],
install_requires=install_requires,
python_requires=">=3.9",
zip_safe=False,
url="https://github.com/LLNL/hiop",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,15 @@
min_obj[prob_type][acq_type] = np.inf
y_opt[prob_type][acq_type] = np.zeros(num_repeat)

options = {
'acquisition_type': acq_type,
'bo_maxiter': 20,
'opt_solver': 'SLSQP', #"SLSQP" "IPOPT"
'solver_options': {
'maxiter': 200
}
}

print("Problem name: ", problem.name)
print("Acquisition type: ", acq_type)

Expand All @@ -84,8 +93,8 @@
gp_model.train(x_train, y_train)

# Instantiate and run Bayesian Optimization
bo = BOAlgorithm(gp_model, x_train, y_train, acquisition_type = acq_type) #EI or LCB
bo.optimize(problem)
bo = BOAlgorithm(problem, gp_model, x_train, y_train, options = options)
bo.optimize()

# Retrieve optimal objec
y_opt[prob_type][acq_type][n_repeat] = bo.getOptimalObjective()
Expand Down
95 changes: 95 additions & 0 deletions src/Drivers/hiopbbpy/BODriverEX.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
"""
Code description:
for a 2D example LpNormProblem
1) randomly sample training points
2) define a Kriging-based Gaussian-process (smt backend)
trained on said data
3) determine the minimizer via BOAlgorithm

Authors: Tucker Hartland <hartland1@llnl.gov>
Nai-Yuan Chiang <chiang7@llnl.gov>
"""

import sys
import os
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
from LpNormProblem import LpNormProblem
from hiopbbpy.surrogate_modeling import smtKRG
from hiopbbpy.opt import BOAlgorithm
from hiopbbpy.problems import BraninProblem


# Get user input for the number of repetitions from command-line arguments
if len(sys.argv) != 2 or int(sys.argv[1]) < 0:
num_repeat = 1
else:
num_repeat = int(sys.argv[1])

### parameters
n_samples = 5 # number of the initial samples to train GP
theta = 1.e-2 # hyperparameter for GP kernel
nx = 2 # dimension of the problem
xlimits = np.array([[-5, 5], [-5, 5]]) # bounds on optimization variable

prob_type_l = ["LpNorm"] # ["LpNorm", "Branin"]
acq_type_l = ["LCB"] # ["LCB", "EI"]

def con_eq(x):
return x[0] + x[1] - 4

def con_jac_eq(x):
return np.array([1.0, 1.0])

def con_ineq(x):
return x[0] - x[1]

def con_jac_ineq(x):
return np.array([1.0, -1.0])

# only 'trust-constr' method supports vector-valued constraints
user_constraint = [{'type': 'ineq', 'fun': con_ineq, 'jac': con_jac_ineq},
{'type': 'eq', 'fun': con_eq, 'jac': con_jac_eq}]

retval = 0
for prob_type in prob_type_l:
print()
if prob_type == "LpNorm":
problem = LpNormProblem(nx, xlimits, constraints=None)
else:
problem = BraninProblem(constraints=None)
problem.set_constraints(user_constraint)

for acq_type in acq_type_l:
print("Problem name: ", problem.name)
print("Acquisition type: ", acq_type)

### initial training set
x_train = problem.sample(n_samples)
y_train = problem.evaluate(x_train)

### Define the GP surrogate model
gp_model = smtKRG(theta, xlimits, nx)
gp_model.train(x_train, y_train)

options = {
'acquisition_type': acq_type,
'bo_maxiter': 10,
'opt_solver': 'IPOPT', #"SLSQP" "IPOPT"
'solver_options': {
'max_iter': 200,
'print_level': 1
}
}

# Instantiate and run Bayesian Optimization
bo = BOAlgorithm(problem, gp_model, x_train, y_train, options = options) #EI or LCB
bo.optimize()

sys.exit(retval)




4 changes: 2 additions & 2 deletions src/Drivers/hiopbbpy/LpNormProblem.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@
from hiopbbpy.problems.problem import Problem

class LpNormProblem(Problem):
def __init__(self, ndim, xlimits, p=2.0):
def __init__(self, ndim, xlimits, p=2.0, constraints=None):
name = "LpNormProblem"
super().__init__(ndim, xlimits, name=name)
super().__init__(ndim, xlimits, name=name, constraints=constraints)
self.p = p

def _evaluate(self, x):
Expand Down
2 changes: 2 additions & 0 deletions src/hiopbbpy/opt/__init__.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
from .boalgorithm import (BOAlgorithmBase, BOAlgorithm)
from .acquisition import (acquisition, LCBacquisition, EIacquisition)
from .optproblem import (IpoptProb)

__all__ = [
"BOAlgorithmBase"
"BOAlgorithm"
"acquisition"
"LCBacquisition"
"EIacquisition"
"IpoptProb"
]
52 changes: 47 additions & 5 deletions src/hiopbbpy/opt/acquisition.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,28 +14,41 @@ class acquisition(object):
def __init__(self, gpsurrogate):
assert isinstance(gpsurrogate, GaussianProcess) # add something here
self.gpsurrogate = gpsurrogate
self.has_gradient = False

# Abstract method to evaluate the acquisition function at x.
def evaluate(self, x: np.ndarray) -> np.ndarray:
raise NotImplementedError("Child class of acquisition should implement method evaluate")

# Abstract method to evaluate the gradient of acquisition function at x.
def eval_g(self, x: np.ndarray) -> np.ndarray:
raise NotImplementedError("Child class of acquisition should implement method evaluate")

# A subclass of acquisition, implementing the Lower Confidence Bound (LCB) acquisition function.
class LCBacquisition(acquisition):
def __init__(self, gpsurrogate, beta=3.0):
super().__init__(gpsurrogate)
self.beta = beta
self.has_gradient = True

# Method to evaluate the acquisition function at x.
def evaluate(self, x : np.ndarray) -> np.ndarray:
mu = self.gpsurrogate.mean(x)
sig = self.gpsurrogate.variance(x)
return mu - self.beta * np.sqrt(sig)
sig2 = self.gpsurrogate.variance(x)
return mu - self.beta * np.sqrt(sig2)

def eval_g(self, x: np.ndarray) -> np.ndarray:
mu = self.gpsurrogate.mean(x)
sig2 = self.gpsurrogate.variance(x)
dsig2_dx = self.gpsurrogate.variance_gradient(x)
dmu_dx = self.gpsurrogate.mean_gradient(x)
return dmu_dx - 0.5 * self.beta * dsig2_dx / np.sqrt(sig2)

# A subclass of acquisition, implementing the Expected improvement (EI) acquisition function.
class EIacquisition(acquisition):
def __init__(self, gpsurrogate):
super().__init__(gpsurrogate)
self.has_gradient = True

# Method to evaluate the acquisition function at x.
def evaluate(self, x : np.ndarray) -> np.ndarray:
Expand All @@ -47,12 +60,41 @@ def evaluate(self, x : np.ndarray) -> np.ndarray:

retval = []
if sig.size == 1 and np.abs(sig) > 1e-12:
arg0 = (y_min - pred) / sig
retval = (y_min - pred) * norm.cdf(arg0) + sig * norm.pdf(arg0)
z = (y_min - pred) / sig
retval = (y_min - pred) * norm.cdf(z) + sig * norm.pdf(z)
retval *= -1.
elif sig.size == 1 and np.abs(sig) <= 1e-12:
retval = 0.0
elif sig.size > 1:
NotImplementedError("TODO --- Not implemented yet!")
raise NotImplementedError("TODO --- Not implemented yet!")

return retval

def eval_g(self, x: np.ndarray) -> np.ndarray:
y_data = self.gpsurrogate.training_y
y_min = y_data[np.argmin(y_data[:, 0])]

mean = self.gpsurrogate.mean(x)
sig2 = self.gpsurrogate.variance(x)
sig = np.sqrt(sig2)

grad_EI = None
if sig.size == 1 and np.abs(sig) > 1e-12:
dmean_dx = self.gpsurrogate.mean_gradient(x)
dsig2_dx = self.gpsurrogate.variance_gradient(x)
dsig_dx = 0.5 * dsig2_dx / sig

z = (y_min - mean) / sig
ncdf = norm.cdf(z)
npdf = norm.pdf(z)
EI = (y_min - mean) * ncdf + sig * npdf

dz_dx = -dmean_dx / sig - (y_min - mean) * dsig_dx / sig**2
grad_EI = -dmean_dx * ncdf + dsig_dx * npdf
grad_EI *= -1.
elif sig.size == 1 and np.abs(sig) <= 1e-12:
grad_EI = 0.0
elif sig.size > 1:
raise NotImplementedError("TODO --- Not implemented yet!")

return grad_EI
Loading
Loading