Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
f7b5a0e
renaming model to specfem_model to make it clear that this is a speci…
bch0w Mar 23, 2025
4f07d2d
small udpates model
bch0w Mar 30, 2025
5119028
fixing up basic architecture of model, single apply now works, workin…
bch0w Mar 30, 2025
440d2ca
updating namespace
bch0w Jun 6, 2025
19d4f78
merging apply with and apply into a single major function, cleaning u…
bch0w Jun 20, 2025
f40a97c
removing dot from apply because dot needs its own function, change ac…
bch0w Jun 20, 2025
a7e1de2
updating codebase to match new model class
bch0w Jun 21, 2025
fbd0b51
small updates
bch0w Jul 16, 2025
1e92681
merging devel
bch0w Aug 13, 2025
ded2939
resolving merge conflicts
bch0w Aug 13, 2025
40fea0f
tests passing
bch0w Aug 13, 2025
9fc85f4
Merge branch 'devel' into feature-optimize_model
bch0w Aug 13, 2025
cf08616
rename optimize function _precondition to precondition
bch0w Aug 13, 2025
9f6540d
updated dot product function for Model and created test for it
bch0w Aug 13, 2025
302cfc9
finished Modle update in Gradient class, moving onto LBFGS
bch0w Aug 14, 2025
7d8106b
updated lbfgs update_search_history to match new model definition
bch0w Aug 14, 2025
ec6fc29
reworking Model.apply() function to be more generalized and less verb…
bch0w Aug 15, 2025
3aa0d65
finish updating LBFGS with new model paradigm
bch0w Aug 16, 2025
647a050
fixing and cleaning up tests, still need to make the checks more robu…
bch0w Aug 16, 2025
b0296aa
adding some magnitude and angle recalculation for model but I think i…
bch0w Aug 16, 2025
2541d9d
POTENTIAL BUGFIX: I think the definition of was incorrect in the ori…
bch0w Aug 16, 2025
efd230e
gradient updated new model paradigm, again, finally
bch0w Aug 19, 2025
6363e73
renamed LBFGS function 'apply_inverse_hessian' to 'apply_inverse_hess…
bch0w Aug 19, 2025
b399f70
Finished updating LBFGS
bch0w Aug 19, 2025
e8b6afb
finished updating NLCG, added some warnings and notimplementederrors …
bch0w Aug 19, 2025
b2671bf
finished updating inversion and migration workflows with new model pa…
bch0w Aug 19, 2025
105f70c
removing poissons ratio check from the model check function, this wil…
bch0w Aug 19, 2025
6cfdfc8
updating how paths are defined in the workflow migration script
bch0w Aug 20, 2025
88d51d4
I have no idea how but the specfem2d example runs through, now time t…
bch0w Aug 20, 2025
04d9c06
cleaning up specfem2d model plotter to be less cumbersome and mostly …
bch0w Aug 20, 2025
1100d22
bugfixing some LBFGS as I run example 2
bch0w Aug 21, 2025
e4acba3
LBFGS now running through iteration 2
bch0w Aug 21, 2025
b738b55
PYAFLOWA bugfix exit workflow when no windows are returned during mis…
bch0w Aug 21, 2025
ea83752
EXAMPLE CHANGE: changing smoothing length from 5km to 20km in both di…
bch0w Aug 21, 2025
d38cdd3
fixing search history update in LBFGS
bch0w Aug 21, 2025
48020c3
LBFGS machinery working over 7 iterations, time for benchmarking
bch0w Aug 21, 2025
457c1dc
fixed the Model.get('mean') function, and fixed tests which were roud…
bch0w Aug 21, 2025
1596f1f
bugfix incorrect model was being passed to the next iteration
bch0w Aug 27, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions seisflows/examples/ex2_hh_w_pyatoa.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,8 @@ def __init__(self, ntask=None, niter=None, nsta=None, nproc=None,
}

# Adjust the existing parameter list
self._parameters["smooth_h"] = 5000.
self._parameters["smooth_v"] = 5000.
self._parameters["smooth_h"] = 20000.
self._parameters["smooth_v"] = 20000.

# Pyaflowa preprocessing parameters
self._parameters["min_period"] = 10. # filter bounds define windows
Expand Down
338 changes: 195 additions & 143 deletions seisflows/optimize/LBFGS.py

Large diffs are not rendered by default.

156 changes: 88 additions & 68 deletions seisflows/optimize/NLCG.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
from seisflows import logger
from seisflows.optimize.gradient import Gradient
from seisflows.tools.math import dot
from seisflows.tools.specfem_model import Model
from seisflows.tools import msg
from seisflows.plugins import line_search as line_search_dir


Expand All @@ -33,11 +35,15 @@ class NLCG(Gradient):
"""
__doc__ = Gradient.__doc__ + __doc__

def __init__(self, nlcg_max=np.inf, nlcg_thresh=np.inf,
def __init__(self, nlcg_max=np.inf, nlcg_thresh=np.inf,
calc_beta="pollak_ribere", **kwargs):
"""NLCG-specific input parameters"""
super().__init__(**kwargs)

# Be honest!
logger.warning(msg.mjr("THIS OPTIMIZATION MODULE IS NOT WELL TESTED "
"USE AT YOUR OWN RISK"))

# Overwrite user-chosen line search. L-BFGS requires 'Backtrack'ing LS
if self.line_search_method.title() != "Bracket":
logger.warning(f"NLCG optimization requires 'bracket'ing line "
Expand All @@ -53,7 +59,7 @@ def __init__(self, nlcg_max=np.inf, nlcg_thresh=np.inf,
self.NLCG_thresh = nlcg_thresh

# Check paramter validity
_acceptable_calc_beta = ["pollak_ribere", "fletcher_reeves"]
_acceptable_calc_beta = ["fletcher_reeves"] # "pollak_ribere"
assert(calc_beta in _acceptable_calc_beta), (
f"unacceptable `calc_beta`, must be in {_acceptable_calc_beta}"
)
Expand All @@ -72,7 +78,7 @@ def checkpoint(self):
checkpoint_dict = np.load(self.path._checkpoint)
checkpoint_dict["NLCG_iter"] = self._NLCG_iter

np.savez(file=self.path._checkpoint, **dict_out) # NOQA
np.savez(file=self.path._checkpoint, **checkpoint_dict) # NOQA

def compute_direction(self):
"""
Expand All @@ -97,54 +103,57 @@ def compute_direction(self):
self._NLCG_iter += 1

# Load the current gradient direction
g_new = self.load_vector("g_new")
p_new = g_new.copy()
g_new = Model(self.path._g_new)
g_old = Model(self.path._g_old)
p_old = Model(self.path._p_old)

# CASE 1: If first iteration, search direction is the current gradient
if self._NLCG_iter == 1:
logger.info("first NLCG iteration, setting search direction "
"as inverse gradient")
p_new.update(vector=-1 * g_new.vector)
restarted = False
# p_new = -g
return g_new.apply(actions=["*"], values=[-1],
export_to=self.path._p_new)
# CASE 2: Force restart if the iterations have surpassed the maximum
# number of allowable iter
elif self._NLCG_iter > self.NLCG_max:
logger.info("restarting NLCG due to periodic restart "
"condition. setting search direction as inverse "
"gradient")
logger.info("restarting NLCG due to periodic restart condition. "
"setting search direction as inverse gradient")
self.restart()
p_new.update(vector=-1 * g_new.vector)
restarted = True
# Normal NLCG direction compuitation
# p_new = -g
return g_new.apply(actions=["*"], values=[-1],
export_to=self.path._p_new)
# Normal NLCG direction computation
else:
# Compute search direction
g_old = self.load_vector("g_old")
p_old = self.load_vector("p_old")
beta = self._calc_beta(g_new.vector, g_old.vector)

# Apply preconditioner and calc. scale factor for search dir. (beta)
_p_new_vec = (
-1 * self._precondition(g_new.vector) + beta * p_old.vector
)
p_new.update(vector=_p_new_vec)

# Check restart conditions, return search direction and statusa
if check_conjugacy(g_new.vector, g_old.vector) > self.NLCG_thresh:
if check_conjugacy(g_new, g_old) > self.NLCG_thresh:
logger.info("restarting NLCG due to loss of conjugacy")
self.restart()
p_new.update(vector=-1 * g_new.vector)
restarted = True
elif check_descent(p_new.vector, g_new.vector) > 0.:
# p_new = -g
return g_new.apply(actions=["*"], values=[-1],
export_to=self.path._p_new)

# Compute beta, the scaling factor for the search direction. User
# has chosen a specific underlying function to do this
beta = self._calc_beta()

# Optional step, apply preconditioner in place
if self.preconditioner:
g_new = self.precondition(q=g_new)

# p_new = -1 * g + B * p_old (couldn't figure out how to do this in
# one action so we apply three)
p_new = g_new.apply(actions=["*"], values=[-1])
p_old = p_old.apply(actions=["*"], values=[beta],
export_to=self.path._scratch)
p_new = p_new.apply(actions=["+"], values=[p_old])

# Check restart conditions, return search direction and statusa
if check_descent(p_new.get("vector"), g_new.get("vector")) > 0.:
logger.info("restarting NLCG, not a descent direction")
self.restart()
p_new.update(vector=-1 * g_new.vector)
restarted = True
else:
# p_new = p_new
restarted = False

# Save values to disk and memory
self._restarted = restarted
# p_new = -g
return g_new.apply(actions=["*"], values=[-1],
export_to=self.path._p_new)

return p_new

Expand All @@ -153,65 +162,76 @@ def restart(self):
Overwrite the Base restart class and include a restart of the NLCG
"""
logger.info("restarting NLCG optimization algorithm")

g_new = self.load_vector("g_new")
p_new = g_new.copy()
p_new.update(vector=-1 * g_new.vector)
self.save_vector("p_new", p_new)

self._line_search.clear_search_history()
self._restarted = 1
self._restarted = True
self._NLCG_iter = 1

def _fletcher_reeves(self, g_new, g_old):
def _fletcher_reeves(self):
"""
One method for calculating beta in the NLCG Algorithm from
Calculating scale factor beta in the NLCG Algorithm following:
Fletcher & Reeves, 1964

:type g_new: np.array
:param g_new: new search direction
:type g_old: np.array
:param g_old: old search direction
:rtype: float
:return: beta, the scale factor to apply to the old search direction to
determine the new search direction
"""
num = dot(self._precondition(g_new), g_new)
den = dot(g_old, g_old)
beta = num / den
return beta
# Use current and old gradients
g_new = Model(self.path._g_new)
g_old = Model(self.path._g_old)

def _pollak_ribere(self, g_new, g_old):
if self.preconditioner:
g_new_prcnd = self.precondition(g_new, export_to=self.path._scratch)
else:
g_new_prcnd = g_new

return g_new_prcnd.dot(g_new) / g_old.dot(g_old)

def _pollak_ribere(self):
"""
One method for calculating beta in the NLCG Algorithm from
Calculating scale factor beta in the NLCG Algorithm following:
Polak & Ribiere, 1969

:type g_new: np.array
:param g_new: new search direction
:type g_old: np.array
:param g_old: old search direction
:rtype: float
:return: beta, the scale factor to apply to the old search direction to
determine the new search direction
"""
num = dot(self._precondition(g_new), g_new - g_old)
den = dot(g_old, g_old)
beta = num / den
return beta
# BC Did not have time to finish updating this, need to figure out how
# to store two scratch variables at the same time. See comment below
raise NotImplementedError("This function is currently not working, if "
"you would like to use it, please open a "
"GitHub issue")

# Use current and old gradients
g_new = Model(self.path._g_new)
g_old = Model(self.path._g_old)
p_old = Model(self.path._p_old)

if self.preconditioner:
g_new_prcnd = self.precondition(g_new, export_to=self.path._scratch)
else:
g_new_prcnd = g_new

# This wont work because both `dg` and `g_new_prcnd` need to be stored
# in scratch. I guess I could make another scratch directory or store it
# somewhere else but I feel like there is a cleaner way
dg = g_new.apply(action=["-"], values=g_old,
export_to=self.path._scratch)

return g_new_prcndt.dot(dg) / g_old.dot(g_old)


def check_conjugacy(g_new, g_old):
"""
Check for conjugacy between two vectors

:type g_new: np.array
:type g_new: seisflows.tools.specfem_model.Model
:param g_new: new search direction
:type g_old: np.array
:type g_old: seisflows.tools.specfem_model.Model
:param g_old: old search direction
:rtype: float
:return: an element that proves conjugacy
"""
return abs(dot(g_new, g_old) / dot(g_new, g_new))
return abs(g_new.dot(g_old) / g_new.dot(g_new))


def check_descent(p_new, g_new):
Expand All @@ -226,7 +246,7 @@ def check_descent(p_new, g_new):
:return: the angle between search direction and gradient direction, should
be negative to ensure descent
"""
return dot(p_new, g_new) / dot(g_new, g_new)
return p_new.dot(g_new) / g_new.dot(g_new)



Loading