diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index b71b9d70d..99fe21f8a 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -26,7 +26,6 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | - sudo apt-get install pandoc pip install --upgrade pip pip install .[cpu,format-and-lint] - name: Apply linter @@ -111,7 +110,6 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | - sudo apt-get install pandoc pip install --upgrade pip pip install .[cpu,doc] - name: Build the HTML docs diff --git a/.github/workflows/doc-publish.yaml b/.github/workflows/doc-publish.yaml index 31e66fa2f..817fd99b2 100644 --- a/.github/workflows/doc-publish.yaml +++ b/.github/workflows/doc-publish.yaml @@ -26,7 +26,6 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | - sudo apt-get install pandoc pip install --upgrade pip pip install .[cpu,doc] - name: Build the HTML docs diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d08b2a8fc..0ae13baa6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -8,7 +8,7 @@ repos: - id: end-of-file-fixer - id: check-merge-conflict - repo: https://github.com/lyz-code/yamlfix/ - rev: 1.17.0 + rev: 1.18.0 hooks: - id: yamlfix - repo: https://github.com/astral-sh/ruff-pre-commit diff --git a/docs/benchmarks/hires/run_hires.py b/docs/benchmarks/hires/run_hires.py index 48fa74bef..f9b2dce39 100644 --- a/docs/benchmarks/hires/run_hires.py +++ b/docs/benchmarks/hires/run_hires.py @@ -89,7 +89,7 @@ def param_to_solution(tol): # Build a solver vf_auto = functools.partial(vf_probdiffeq, t=t0) tcoeffs = taylor.odejet_padded_scan(vf_auto, (u0,), num=num_derivatives) - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact="dense") + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact="dense") ts1 = ivpsolvers.correction_ts1(ssm=ssm) strategy = ivpsolvers.strategy_filter(ssm=ssm) solver = ivpsolvers.solver_dynamic(strategy, prior=ibm, correction=ts1, ssm=ssm) @@ -98,9 +98,6 @@ def param_to_solution(tol): solver, atol=1e-2 * tol, rtol=tol, control=control, ssm=ssm, clip_dt=True ) - # Initial state - init = solver.initial_condition() - # Solve dt0 = ivpsolve.dt0(vf_auto, (u0,)) solution = ivpsolve.solve_adaptive_terminal_values( diff --git a/docs/benchmarks/lotkavolterra/run_lotkavolterra.py b/docs/benchmarks/lotkavolterra/run_lotkavolterra.py index 197ff26db..36fa4654b 100644 --- a/docs/benchmarks/lotkavolterra/run_lotkavolterra.py +++ b/docs/benchmarks/lotkavolterra/run_lotkavolterra.py @@ -80,7 +80,9 @@ def param_to_solution(tol): vf_auto = functools.partial(vf_probdiffeq, t=t0) tcoeffs = taylor.odejet_padded_scan(vf_auto, (u0,), num=num_derivatives) - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=implementation) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated( + tcoeffs, ssm_fact=implementation + ) strategy = ivpsolvers.strategy_filter(ssm=ssm) corr = correction(ssm=ssm) solver = ivpsolvers.solver_mle(strategy, prior=ibm, correction=corr, ssm=ssm) @@ -89,9 +91,6 @@ def param_to_solution(tol): solver, atol=1e-2 * tol, rtol=tol, control=control, ssm=ssm ) - # Initial state - init = solver.initial_condition() - # Solve dt0 = ivpsolve.dt0(vf_auto, (u0,)) solution = ivpsolve.solve_adaptive_terminal_values( diff --git a/docs/benchmarks/pleiades/run_pleiades.py b/docs/benchmarks/pleiades/run_pleiades.py index d71bbe8d6..ac5f4f7cb 100644 --- a/docs/benchmarks/pleiades/run_pleiades.py +++ b/docs/benchmarks/pleiades/run_pleiades.py @@ -99,7 +99,9 @@ def param_to_solution(tol): vf_auto = functools.partial(vf_probdiffeq, t=t0) tcoeffs = taylor.odejet_padded_scan(vf_auto, (u0, du0), num=num_derivatives - 1) - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact="isotropic") + init, ibm, ssm = ivpsolvers.prior_wiener_integrated( + tcoeffs, ssm_fact="isotropic" + ) ts0_or_ts1 = correction_fun(ssm=ssm, ode_order=2) strategy = ivpsolvers.strategy_filter(ssm=ssm) solver = ivpsolvers.solver_dynamic( @@ -110,9 +112,6 @@ def param_to_solution(tol): solver, atol=1e-3 * tol, rtol=tol, control=control, ssm=ssm ) - # Initial state - init = solver.initial_condition() - # Solve dt0 = ivpsolve.dt0(vf_auto, (u0, du0)) solution = ivpsolve.solve_adaptive_terminal_values( diff --git a/docs/benchmarks/vanderpol/run_vanderpol.py b/docs/benchmarks/vanderpol/run_vanderpol.py index 0fd3e4a6b..9e4b18b59 100644 --- a/docs/benchmarks/vanderpol/run_vanderpol.py +++ b/docs/benchmarks/vanderpol/run_vanderpol.py @@ -81,7 +81,7 @@ def param_to_solution(tol): vf_auto = functools.partial(vf_probdiffeq, t=t0) tcoeffs = taylor.odejet_padded_scan(vf_auto, (u0, du0), num=num_derivatives - 1) - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact="dense") + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact="dense") ts0_or_ts1 = ivpsolvers.correction_ts1(ode_order=2, ssm=ssm) strategy = ivpsolvers.strategy_filter(ssm=ssm) @@ -93,9 +93,6 @@ def param_to_solution(tol): solver, atol=1e-3 * tol, rtol=tol, control=control, ssm=ssm, clip_dt=True ) - # Initial state - init = solver.initial_condition() - # Solve dt0 = ivpsolve.dt0(vf_auto, (u0, du0)) solution = ivpsolve.solve_adaptive_terminal_values( diff --git a/docs/examples_advanced/equinox_while_loop.py b/docs/examples_advanced/equinox_while_loop.py index 15a43576a..2d07178df 100644 --- a/docs/examples_advanced/equinox_while_loop.py +++ b/docs/examples_advanced/equinox_while_loop.py @@ -61,13 +61,12 @@ def vf(y, *, t): # noqa: ARG001 u0 = jnp.asarray([0.1]) tcoeffs = taylor.odejet_padded_scan(lambda y: vf(y, t=t0), (u0,), num=1) - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact="isotropic") + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact="isotropic") ts0 = ivpsolvers.correction_ts0(ode_order=1, ssm=ssm) strategy = ivpsolvers.strategy_fixedpoint(ssm=ssm) solver = ivpsolvers.solver(strategy, prior=ibm, correction=ts0, ssm=ssm) adaptive_solver = ivpsolvers.adaptive(solver, ssm=ssm) - init = solver.initial_condition() def simulate(init_val): """Evaluate the parameter-to-solution function.""" diff --git a/docs/examples_advanced/neural_ode.py b/docs/examples_advanced/neural_ode.py index 0cf5480b1..94db36b04 100644 --- a/docs/examples_advanced/neural_ode.py +++ b/docs/examples_advanced/neural_ode.py @@ -149,7 +149,7 @@ def loss( """Loss function: log-marginal likelihood of the data.""" # Build a solver tcoeffs = (*u0, vf(*u0, t=t0, p=p)) - ibm, ssm = ivpsolvers.prior_ibm( + init, ibm, ssm = ivpsolvers.prior_wiener_integrated( tcoeffs, output_scale=output_scale, ssm_fact="isotropic" ) ts0 = ivpsolvers.correction_ts0(ssm=ssm) @@ -157,7 +157,6 @@ def loss( solver_ts0 = ivpsolvers.solver(strategy, prior=ibm, correction=ts0, ssm=ssm) # Solve - init = solver_ts0.initial_condition() sol = ivpsolve.solve_fixed_grid( lambda *a, **kw: vf(*a, **kw, p=p), init, diff --git a/docs/examples_advanced/parameter_estimation_blackjax.py b/docs/examples_advanced/parameter_estimation_blackjax.py index 52203aa82..afecaf9b8 100644 --- a/docs/examples_advanced/parameter_estimation_blackjax.py +++ b/docs/examples_advanced/parameter_estimation_blackjax.py @@ -183,13 +183,12 @@ def solve_fixed(theta, *, ts): # Create a probabilistic solver tcoeffs = taylor.odejet_padded_scan(lambda y: vf(y, t=t0), (theta,), num=2) output_scale = 10.0 - ibm, ssm = ivpsolvers.prior_ibm( + init, ibm, ssm = ivpsolvers.prior_wiener_integrated( tcoeffs, output_scale=output_scale, ssm_fact="isotropic" ) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = ivpsolvers.strategy_filter(ssm=ssm) solver = ivpsolvers.solver(strategy, prior=ibm, correction=ts0, ssm=ssm) - init = solver.initial_condition() return ivpsolve.solve_fixed_grid(vf, init, grid=ts, solver=solver, ssm=ssm) @@ -199,15 +198,13 @@ def solve_adaptive(theta, *, save_at): # Create a probabilistic solver tcoeffs = taylor.odejet_padded_scan(lambda y: vf(y, t=t0), (theta,), num=2) output_scale = 10.0 - ibm, ssm = ivpsolvers.prior_ibm( + init, ibm, ssm = ivpsolvers.prior_wiener_integrated( tcoeffs, output_scale=output_scale, ssm_fact="isotropic" ) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = ivpsolvers.strategy_filter(ssm=ssm) solver = ivpsolvers.solver(strategy, prior=ibm, correction=ts0, ssm=ssm) adaptive_solver = ivpsolvers.adaptive(solver, ssm=ssm) - - init = solver.initial_condition() return ivpsolve.solve_adaptive_save_at( vf, init, save_at=save_at, adaptive_solver=adaptive_solver, dt0=0.1, ssm=ssm ) diff --git a/docs/examples_advanced/parameter_estimation_optax.py b/docs/examples_advanced/parameter_estimation_optax.py index 13a334f52..a742cacde 100644 --- a/docs/examples_advanced/parameter_estimation_optax.py +++ b/docs/examples_advanced/parameter_estimation_optax.py @@ -59,14 +59,12 @@ def solve(p): """Evaluate the parameter-to-solution map.""" tcoeffs = (u0, vf(u0, t0, p=p)) output_scale = 10.0 - ibm, ssm = ivpsolvers.prior_ibm( + init, ibm, ssm = ivpsolvers.prior_wiener_integrated( tcoeffs, output_scale=output_scale, ssm_fact="isotropic" ) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = ivpsolvers.strategy_smoother(ssm=ssm) solver = ivpsolvers.solver(strategy, prior=ibm, correction=ts0, ssm=ssm) - - init = solver.initial_condition() return ivpsolve.solve_fixed_grid( lambda y, t: vf(y, t, p=p), init, grid=ts, solver=solver, ssm=ssm ) diff --git a/docs/examples_basic/conditioning-on-zero-residual.py b/docs/examples_basic/conditioning_on_zero_residual.py similarity index 96% rename from docs/examples_basic/conditioning-on-zero-residual.py rename to docs/examples_basic/conditioning_on_zero_residual.py index c639731e1..939aec4f8 100644 --- a/docs/examples_basic/conditioning-on-zero-residual.py +++ b/docs/examples_basic/conditioning_on_zero_residual.py @@ -55,7 +55,7 @@ def vector_field(y, t): # noqa: ARG001 NUM_DERIVATIVES = 2 tcoeffs_like = [u0] * (NUM_DERIVATIVES + 1) ts = jnp.linspace(t0, t1, num=500, endpoint=True) -(init_raw, transitions), ssm = ivpsolvers.prior_ibm_discrete( +init_raw, transitions, ssm = ivpsolvers.prior_wiener_integrated_discrete( ts, tcoeffs_like=tcoeffs_like, output_scale=100.0, ssm_fact="dense" ) @@ -71,19 +71,18 @@ def vector_field(y, t): # noqa: ARG001 # + # Compute the posterior -ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, output_scale=1.0, ssm_fact="dense") +init, ibm, ssm = ivpsolvers.prior_wiener_integrated( + tcoeffs, output_scale=1.0, ssm_fact="dense" +) ts1 = ivpsolvers.correction_ts1(ssm=ssm) strategy = ivpsolvers.strategy_fixedpoint(ssm=ssm) solver = ivpsolvers.solver(strategy, prior=ibm, correction=ts1, ssm=ssm) adaptive_solver = ivpsolvers.adaptive(solver, atol=1e-1, rtol=1e-2, ssm=ssm) dt0 = ivpsolve.dt0(lambda y: vector_field(y, t=t0), (u0,)) - -init = solver.initial_condition() sol = ivpsolve.solve_adaptive_save_at( vector_field, init, save_at=ts, dt0=1.0, adaptive_solver=adaptive_solver, ssm=ssm ) -# posterior = stats.calibrate(sol.posterior, sol.output_scale) markov_seq_posterior = stats.markov_select_terminal(sol.posterior) # + diff --git a/docs/examples_basic/dynamic_output_scales.py b/docs/examples_basic/dynamic_output_scales.py index cb3abf356..f4a1192c2 100644 --- a/docs/examples_basic/dynamic_output_scales.py +++ b/docs/examples_basic/dynamic_output_scales.py @@ -64,7 +64,9 @@ def vf(*ys, t): # noqa: ARG001 num_derivatives = 1 tcoeffs = (u0, vf(u0, t=t0)) -ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, output_scale=1.0, ssm_fact="dense") +init, ibm, ssm = ivpsolvers.prior_wiener_integrated( + tcoeffs, output_scale=1.0, ssm_fact="dense" +) ts1 = ivpsolvers.correction_ts1(ssm=ssm) strategy = ivpsolvers.strategy_filter(ssm=ssm) dynamic = ivpsolvers.solver_dynamic(strategy, prior=ibm, correction=ts1, ssm=ssm) @@ -77,12 +79,8 @@ def vf(*ys, t): # noqa: ARG001 ts = jnp.linspace(t0, t1, num=num_pts, endpoint=True) -init_mle = mle.initial_condition() -init_dynamic = dynamic.initial_condition() -solution_dynamic = ivpsolve.solve_fixed_grid( - vf, init_mle, grid=ts, solver=dynamic, ssm=ssm -) -solution_mle = ivpsolve.solve_fixed_grid(vf, init_dynamic, grid=ts, solver=mle, ssm=ssm) +solution_dynamic = ivpsolve.solve_fixed_grid(vf, init, grid=ts, solver=dynamic, ssm=ssm) +solution_mle = ivpsolve.solve_fixed_grid(vf, init, grid=ts, solver=mle, ssm=ssm) # - # Plot the solution. diff --git a/docs/examples_basic/posterior_uncertainties.py b/docs/examples_basic/posterior_uncertainties.py index 96c594756..3cc77e329 100644 --- a/docs/examples_basic/posterior_uncertainties.py +++ b/docs/examples_basic/posterior_uncertainties.py @@ -42,7 +42,7 @@ def vf(y, *, t): # noqa: ARG001 # Set up a solver # To all users: Try replacing the fixedpoint-smoother with a filter! tcoeffs = taylor.odejet_padded_scan(lambda y: vf(y, t=t0), (u0,), num=3) -ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact="dense") +init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact="dense") ts = ivpsolvers.correction_ts1(ssm=ssm) strategy = ivpsolvers.strategy_fixedpoint(ssm=ssm) solver = ivpsolvers.solver_mle(strategy, prior=ibm, correction=ts, ssm=ssm) @@ -50,7 +50,6 @@ def vf(y, *, t): # noqa: ARG001 # Solve the ODE ts = jnp.linspace(t0, t1, endpoint=True, num=50) -init = solver.initial_condition() sol = ivpsolve.solve_adaptive_save_at( vf, init, save_at=ts, dt0=0.1, adaptive_solver=adaptive_solver, ssm=ssm ) diff --git a/docs/examples_basic/second_order_problems.py b/docs/examples_basic/second_order_problems.py index bff53dba7..fda834099 100644 --- a/docs/examples_basic/second_order_problems.py +++ b/docs/examples_basic/second_order_problems.py @@ -44,7 +44,9 @@ def vf_1(y, t): # noqa: ARG001 tcoeffs = taylor.odejet_padded_scan(lambda y: vf_1(y, t=t0), (u0,), num=4) -ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, output_scale=1.0, ssm_fact="isotropic") +init, ibm, ssm = ivpsolvers.prior_wiener_integrated( + tcoeffs, output_scale=1.0, ssm_fact="isotropic" +) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = ivpsolvers.strategy_filter(ssm=ssm) solver_1st = ivpsolvers.solver_mle(strategy, prior=ibm, correction=ts0, ssm=ssm) @@ -53,7 +55,6 @@ def vf_1(y, t): # noqa: ARG001 # - -init = solver_1st.initial_condition() solution = ivpsolve.solve_adaptive_save_every_step( vf_1, init, t0=t0, t1=t1, dt0=0.1, adaptive_solver=adaptive_solver_1st, ssm=ssm ) @@ -78,14 +79,14 @@ def vf_2(y, dy, t): # noqa: ARG001 # One derivative more than above because we don't transform to first order tcoeffs = taylor.odejet_padded_scan(lambda *ys: vf_2(*ys, t=t0), (u0, du0), num=3) -ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, output_scale=1.0, ssm_fact="isotropic") +init, ibm, ssm = ivpsolvers.prior_wiener_integrated( + tcoeffs, output_scale=1.0, ssm_fact="isotropic" +) ts0 = ivpsolvers.correction_ts0(ode_order=2, ssm=ssm) strategy = ivpsolvers.strategy_filter(ssm=ssm) solver_2nd = ivpsolvers.solver_mle(strategy, prior=ibm, correction=ts0, ssm=ssm) adaptive_solver_2nd = ivpsolvers.adaptive(solver_2nd, atol=1e-5, rtol=1e-5, ssm=ssm) - -init = solver_2nd.initial_condition() # - solution = ivpsolve.solve_adaptive_save_every_step( diff --git a/docs/examples_basic/taylor_coefficients.py b/docs/examples_basic/taylor_coefficients.py index 80803f227..05211b986 100644 --- a/docs/examples_basic/taylor_coefficients.py +++ b/docs/examples_basic/taylor_coefficients.py @@ -57,12 +57,10 @@ def vf(*y, t): # noqa: ARG001 # + def solve(tc): """Solve the ODE.""" - prior, ssm = ivpsolvers.prior_ibm(tc, ssm_fact="dense") + init, prior, ssm = ivpsolvers.prior_wiener_integrated(tc, ssm_fact="dense") ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = ivpsolvers.strategy_fixedpoint(ssm=ssm) solver = ivpsolvers.solver_mle(strategy, prior=prior, correction=ts0, ssm=ssm) - init = solver.initial_condition() - ts = jnp.linspace(t0, t1, endpoint=True, num=10) adaptive_solver = ivpsolvers.adaptive(solver, atol=1e-2, rtol=1e-2, ssm=ssm) return ivpsolve.solve_adaptive_save_at( diff --git a/docs/examples_quickstart/quickstart.py b/docs/examples_quickstart/quickstart.py index 5120fe7a0..8fd4c01a8 100644 --- a/docs/examples_quickstart/quickstart.py +++ b/docs/examples_quickstart/quickstart.py @@ -39,7 +39,7 @@ def vf(y, *, t): # noqa: ARG001 # Set up a state-space model tcoeffs = taylor.odejet_padded_scan(lambda y: vf(y, t=t0), (u0,), num=1) -ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact="dense") +init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact="dense") # Build a solver @@ -51,7 +51,6 @@ def vf(y, *, t): # noqa: ARG001 # Solve the ODE # To all users: Try different solution routines. -init = solver.initial_condition() solution = ivpsolve.solve_adaptive_save_every_step( vf, init, t0=t0, t1=t1, dt0=0.1, adaptive_solver=adaptive_solver, ssm=ssm ) diff --git a/mkdocs.yml b/mkdocs.yml index eccc2709a..0e44fa1f1 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -79,7 +79,7 @@ nav: - Choosing a solver: choosing_a_solver.md - Troubleshooting: troubleshooting.md - EXAMPLES | BASIC: - - examples_basic/conditioning-on-zero-residual.ipynb + - examples_basic/conditioning_on_zero_residual.ipynb - examples_basic/posterior_uncertainties.ipynb - examples_basic/dynamic_output_scales.ipynb - examples_basic/second_order_problems.ipynb diff --git a/probdiffeq/backend/linalg.py b/probdiffeq/backend/linalg.py index 2879e5848..21c3fa100 100644 --- a/probdiffeq/backend/linalg.py +++ b/probdiffeq/backend/linalg.py @@ -39,13 +39,6 @@ def qr_r_jvp(primals, tangents): # All Cholesky factors are lower-triangular by default -def cholesky_factor(arr, /): - return jnp.linalg.cholesky(arr) - - -# All Cholesky factors are lower-triangular by default - - def cholesky_solve(arr, rhs, /): return jax.scipy.linalg.cho_solve((arr, True), rhs) diff --git a/probdiffeq/backend/numpy.py b/probdiffeq/backend/numpy.py index 670a57e27..ffc8ca364 100644 --- a/probdiffeq/backend/numpy.py +++ b/probdiffeq/backend/numpy.py @@ -60,10 +60,6 @@ def squeeze(arr, /): return jnp.squeeze(arr) -def squeeze_along_axis(arr, /, *, axis): - return jnp.squeeze(arr, axis=axis) - - def atleast_1d(arr, /): return jnp.atleast_1d(arr) diff --git a/probdiffeq/backend/typing.py b/probdiffeq/backend/typing.py index c02827689..5ea25c2b3 100644 --- a/probdiffeq/backend/typing.py +++ b/probdiffeq/backend/typing.py @@ -1,7 +1,7 @@ """Typing module.""" from collections.abc import Callable, Sequence -from typing import Any, Generic, Optional, TypeAlias, TypeVar +from typing import Any, Generic, TypeAlias, TypeVar import jax from mypy_extensions import NamedArg diff --git a/probdiffeq/impl/_prototypes.py b/probdiffeq/impl/_prototypes.py index a1c289ad2..c86290e3e 100644 --- a/probdiffeq/impl/_prototypes.py +++ b/probdiffeq/impl/_prototypes.py @@ -26,18 +26,18 @@ def __init__(self, ode_shape): self.ode_shape = ode_shape def qoi(self): - return np.empty(self.ode_shape) + return np.ones(self.ode_shape) def observed(self): - mean = np.empty(self.ode_shape) - cholesky = np.empty(self.ode_shape + self.ode_shape) + mean = np.ones(self.ode_shape) + cholesky = np.ones(self.ode_shape + self.ode_shape) return _normal.Normal(mean, cholesky) def error_estimate(self): - return np.empty(self.ode_shape) + return np.ones(self.ode_shape) def output_scale(self): - return np.empty(()) + return np.ones(()) class IsotropicPrototype(PrototypeBackend): @@ -45,18 +45,18 @@ def __init__(self, ode_shape): self.ode_shape = ode_shape def qoi(self): - return np.empty(self.ode_shape) + return np.ones(self.ode_shape) def observed(self): - mean = np.empty((1, *self.ode_shape)) - cholesky = np.empty(()) + mean = np.ones((1, *self.ode_shape)) + cholesky = np.ones(()) return _normal.Normal(mean, cholesky) def error_estimate(self): - return np.empty(()) + return np.ones(()) def output_scale(self): - return np.empty(()) + return np.ones(()) class BlockDiagPrototype(PrototypeBackend): @@ -64,15 +64,15 @@ def __init__(self, ode_shape): self.ode_shape = ode_shape def qoi(self): - return np.empty(self.ode_shape) + return np.ones(self.ode_shape) def observed(self): - mean = np.empty((*self.ode_shape, 1)) - cholesky = np.empty((*self.ode_shape, 1, 1)) + mean = np.ones((*self.ode_shape, 1)) + cholesky = np.ones((*self.ode_shape, 1, 1)) return _normal.Normal(mean, cholesky) def error_estimate(self): - return np.empty(self.ode_shape) + return np.ones(self.ode_shape) def output_scale(self): - return np.empty(self.ode_shape) + return np.ones(self.ode_shape) diff --git a/probdiffeq/impl/impl.py b/probdiffeq/impl/impl.py index 57a54cf59..368c4c171 100644 --- a/probdiffeq/impl/impl.py +++ b/probdiffeq/impl/impl.py @@ -15,6 +15,7 @@ class FactImpl: stats: _stats.StatsBackend linearise: _linearise.LinearisationBackend conditional: _conditional.ConditionalBackend + num_derivatives: int # To assert a valid tree_equal of solutions, the factorisations # must be comparable. @@ -67,6 +68,7 @@ def _select_dense(*, tcoeffs_like) -> FactImpl: normal=normal, prototypes=prototypes, stats=stats, + num_derivatives=len(tcoeffs_like) - 1, ) @@ -92,6 +94,7 @@ def _select_isotropic(*, tcoeffs_like) -> FactImpl: stats=stats, linearise=linearise, conditional=conditional, + num_derivatives=len(tcoeffs_like) - 1, ) @@ -117,4 +120,5 @@ def _select_blockdiag(*, tcoeffs_like) -> FactImpl: stats=stats, linearise=linearise, conditional=conditional, + num_derivatives=len(tcoeffs_like) - 1, ) diff --git a/probdiffeq/ivpsolve.py b/probdiffeq/ivpsolve.py index 72dfa2e49..e0123164c 100644 --- a/probdiffeq/ivpsolve.py +++ b/probdiffeq/ivpsolve.py @@ -85,13 +85,13 @@ def _sol_unflatten(aux, children): def solve_adaptive_terminal_values( - vector_field, initial_condition, t0, t1, adaptive_solver, dt0, *, ssm + vector_field, ssm_init, t0, t1, adaptive_solver, dt0, *, ssm ) -> IVPSolution: """Simulate the terminal values of an initial value problem.""" save_at = np.asarray([t0, t1]) solution = solve_adaptive_save_at( vector_field, - initial_condition, + ssm_init, save_at=save_at, adaptive_solver=adaptive_solver, dt0=dt0, @@ -102,7 +102,7 @@ def solve_adaptive_terminal_values( def solve_adaptive_save_at( - vector_field, initial_condition, save_at, adaptive_solver, dt0, *, ssm, warn=True + vector_field, ssm_init, save_at, adaptive_solver, dt0, *, ssm, warn=True ) -> IVPSolution: r"""Solve an initial value problem and return the solution at a pre-determined grid. @@ -140,7 +140,7 @@ def solve_adaptive_save_at( (_t, solution_save_at), _, num_steps = _solve_adaptive_save_at( tree_util.Partial(vector_field), save_at[0], - initial_condition, + ssm_init, save_at=save_at[1:], adaptive_solver=adaptive_solver, dt0=dt0, @@ -148,11 +148,8 @@ def solve_adaptive_save_at( # I think the user expects the initial condition to be part of the state # (as well as marginals), so we compute those things here - posterior_t0 = initial_condition.posterior posterior_save_at, output_scale = solution_save_at - _tmp = _userfriendly_output( - posterior=posterior_save_at, posterior_t0=posterior_t0, ssm=ssm - ) + _tmp = _userfriendly_output(posterior=posterior_save_at, ssm_init=ssm_init, ssm=ssm) marginals, posterior = _tmp u = ssm.stats.qoi_from_sample(marginals.mean) std = ssm.stats.standard_deviation(marginals) @@ -170,7 +167,7 @@ def solve_adaptive_save_at( def _solve_adaptive_save_at( - vector_field, t, initial_condition, *, save_at, adaptive_solver, dt0 + vector_field, t, ssm_init, *, save_at, adaptive_solver, dt0 ): def advance(state, t_next): # Advance until accepted.t >= t_next. @@ -200,13 +197,13 @@ def body_fun(s): ) return state, solution - state = adaptive_solver.init(t, initial_condition, dt=dt0, num_steps=0.0) + state = adaptive_solver.init(t, ssm_init, dt=dt0, num_steps=0.0) _, solution = control_flow.scan(advance, init=state, xs=save_at, reverse=False) return solution def solve_adaptive_save_every_step( - vector_field, initial_condition, t0, t1, adaptive_solver, dt0, *, ssm + vector_field, ssm_init, t0, t1, adaptive_solver, dt0, *, ssm ) -> IVPSolution: """Solve an initial value problem and save every step. @@ -225,7 +222,7 @@ def solve_adaptive_save_every_step( generator = _solution_generator( tree_util.Partial(vector_field), t0, - initial_condition, + ssm_init, t1=t1, adaptive_solver=adaptive_solver, dt0=dt0, @@ -238,9 +235,8 @@ def solve_adaptive_save_every_step( t = np.concatenate((np.atleast_1d(t0), t)) # I think the user expects marginals, so we compute them here - posterior_t0 = initial_condition.posterior posterior, output_scale = solution_every_step - _tmp = _userfriendly_output(posterior=posterior, posterior_t0=posterior_t0, ssm=ssm) + _tmp = _userfriendly_output(posterior=posterior, ssm_init=ssm_init, ssm=ssm) marginals, posterior = _tmp u = ssm.stats.qoi_from_sample(marginals.mean) @@ -258,10 +254,8 @@ def solve_adaptive_save_every_step( ) -def _solution_generator( - vector_field, t, initial_condition, *, dt0, t1, adaptive_solver -): - state = adaptive_solver.init(t, initial_condition, dt=dt0, num_steps=0) +def _solution_generator(vector_field, t, ssm_init, *, dt0, t1, adaptive_solver): + state = adaptive_solver.init(t, ssm_init, dt=dt0, num_steps=0) while state.step_from.t < t1: state = adaptive_solver.rejection_loop(state, vector_field=vector_field, t1=t1) @@ -280,9 +274,7 @@ def _solution_generator( yield solution -def solve_fixed_grid( - vector_field, initial_condition, grid, solver, *, ssm -) -> IVPSolution: +def solve_fixed_grid(vector_field, ssm_init, grid, solver, *, ssm) -> IVPSolution: """Solve an initial value problem on a fixed, pre-determined grid.""" # Compute the solution @@ -291,13 +283,12 @@ def body_fn(s, dt): return s_new, s_new t0 = grid[0] - state0 = solver.init(t0, initial_condition) + state0 = solver.init(t0, ssm_init) _, result_state = control_flow.scan(body_fn, init=state0, xs=np.diff(grid)) _t, (posterior, output_scale) = solver.extract(result_state) # I think the user expects marginals, so we compute them here - posterior_t0 = initial_condition.posterior - _tmp = _userfriendly_output(posterior=posterior, posterior_t0=posterior_t0, ssm=ssm) + _tmp = _userfriendly_output(posterior=posterior, ssm_init=ssm_init, ssm=ssm) marginals, posterior = _tmp u = ssm.stats.qoi_from_sample(marginals.mean) @@ -315,7 +306,7 @@ def body_fn(s, dt): ) -def _userfriendly_output(*, posterior, posterior_t0, ssm): +def _userfriendly_output(*, posterior, ssm_init, ssm): if isinstance(posterior, stats.MarkovSeq): # Compute marginals posterior_no_filter_marginals = stats.markov_select_terminal(posterior) @@ -328,11 +319,10 @@ def _userfriendly_output(*, posterior, posterior_t0, ssm): marginals = tree_array_util.tree_append(marginals, marginal_t1) # Prepend the marginal at t1 to the inits - init_t0 = posterior_t0.init - init = tree_array_util.tree_prepend(init_t0, posterior.init) + init = tree_array_util.tree_prepend(ssm_init, posterior.init) posterior = stats.MarkovSeq(init=init, conditional=posterior.conditional) else: - posterior = tree_array_util.tree_prepend(posterior_t0, posterior) + posterior = tree_array_util.tree_prepend(ssm_init, posterior) marginals = posterior return marginals, posterior diff --git a/probdiffeq/ivpsolvers.py b/probdiffeq/ivpsolvers.py index 8cc6e231f..127e8d56e 100644 --- a/probdiffeq/ivpsolvers.py +++ b/probdiffeq/ivpsolvers.py @@ -1,6 +1,6 @@ """Probabilistic IVP solvers.""" -from probdiffeq import ivpsolve, stats +from probdiffeq import stats from probdiffeq.backend import ( containers, control_flow, @@ -26,30 +26,30 @@ def num_derivatives(self): return len(self.tcoeffs) - 1 -def prior_ibm(tcoeffs, *, ssm_fact: str, output_scale=None): +def prior_wiener_integrated(tcoeffs, *, ssm_fact: str, output_scale=None): """Construct an adaptive(/continuous-time), multiply-integrated Wiener process.""" ssm = impl.choose(ssm_fact, tcoeffs_like=tcoeffs) if output_scale is None: output_scale = np.ones_like(ssm.prototypes.output_scale()) discretize = ssm.conditional.ibm_transitions(output_scale=output_scale) + init = ssm.normal.from_tcoeffs(tcoeffs) + return init, discretize, ssm - output_scale_calib = np.ones_like(ssm.prototypes.output_scale()) - prior = _MarkovProcess(tcoeffs, output_scale_calib, discretize=discretize) - return prior, ssm - -def prior_ibm_discrete(ts, *, tcoeffs_like, ssm_fact: str, output_scale=None): +def prior_wiener_integrated_discrete( + ts, *, tcoeffs_like, ssm_fact: str, output_scale=None +): """Compute a time-discretized, multiply-integrated Wiener process.""" - prior, ssm = prior_ibm(tcoeffs_like, output_scale=output_scale, ssm_fact=ssm_fact) - transitions, (p, p_inv) = functools.vmap(prior.discretize)(np.diff(ts)) + init, discretize, ssm = prior_wiener_integrated( + tcoeffs_like, output_scale=output_scale, ssm_fact=ssm_fact + ) + transitions, (p, p_inv) = functools.vmap(discretize)(np.diff(ts)) preconditioner_apply_vmap = functools.vmap(ssm.conditional.preconditioner_apply) conditionals = preconditioner_apply_vmap(transitions, p, p_inv) - output_scale = np.ones_like(ssm.prototypes.output_scale()) - init = ssm.normal.standard(len(tcoeffs_like), output_scale=output_scale) - return stats.MarkovSeq(init, conditionals), ssm + return init, conditionals, ssm @containers.dataclass @@ -188,10 +188,6 @@ class _Strategy: is_suitable_for_save_every_step: int is_suitable_for_offgrid_marginals: int - def initial_condition(self, *, prior): - """Compute an initial condition from a set of Taylor coefficients.""" - raise NotImplementedError - def init(self, sol: stats.MarkovSeq, /): """Initialise a state from a solution.""" raise NotImplementedError @@ -222,13 +218,15 @@ def strategy_smoother(*, ssm) -> _Strategy: @containers.dataclass class Smoother(_Strategy): - def initial_condition(self, *, prior): - rv = self.ssm.normal.from_tcoeffs(prior.tcoeffs) - cond = self.ssm.conditional.identity(len(prior.tcoeffs)) - return stats.MarkovSeq(init=rv, conditional=cond) - - def init(self, sol: stats.MarkovSeq, /): - return sol.init, sol.conditional + def init(self, sol, /): + # Special case for implementing offgrid-marginals... + if isinstance(sol, stats.MarkovSeq): + rv = sol.init + cond = sol.conditional + else: + rv = sol + cond = self.ssm.conditional.identity(ssm.num_derivatives + 1) + return rv, cond def begin(self, rv, _extra, /, *, prior_discretized): cond, (p, p_inv) = prior_discretized @@ -274,12 +272,12 @@ def interpolate(self, state_t0, marginal_t1, *, dt0, dt1, output_scale, prior): """ # Extrapolate from t0 to t, and from t to t1. # This yields all building blocks. - prior0 = prior.discretize(dt0) + prior0 = prior(dt0) extrapolated_t = self._extrapolate( *state_t0, output_scale=output_scale, prior_discretized=prior0 ) - prior1 = prior.discretize(dt1) + prior1 = prior(dt1) extrapolated_t1 = self._extrapolate( *extrapolated_t, output_scale=output_scale, prior_discretized=prior1 ) @@ -324,9 +322,6 @@ class Filter(_Strategy): def init(self, sol, /): return sol, None - def initial_condition(self, *, prior): - return self.ssm.normal.from_tcoeffs(prior.tcoeffs) - def begin(self, rv, _extra, /, prior_discretized): cond, (p, p_inv) = prior_discretized @@ -360,7 +355,7 @@ def interpolate(self, state_t0, marginal_t1, dt0, dt1, output_scale, *, prior): del dt1 hidden, extra = state_t0 - prior0 = prior.discretize(dt0) + prior0 = prior(dt0) hidden, extra = self.begin(hidden, extra, prior_discretized=prior0) hidden, extra = self.complete(hidden, extra, output_scale=output_scale) @@ -389,13 +384,9 @@ def strategy_fixedpoint(*, ssm) -> _Strategy: @containers.dataclass class FixedPoint(_Strategy): - def initial_condition(self, prior): - rv = self.ssm.normal.from_tcoeffs(prior.tcoeffs) - cond = self.ssm.conditional.identity(len(prior.tcoeffs)) - return stats.MarkovSeq(init=rv, conditional=cond) - - def init(self, sol: stats.MarkovSeq, /): - return sol.init, sol.conditional + def init(self, sol, /): + cond = self.ssm.conditional.identity(ssm.num_derivatives + 1) + return sol, cond def begin(self, rv, extra, /, prior_discretized): cond, (p, p_inv) = prior_discretized @@ -429,7 +420,8 @@ def complete(self, _rv, extra, /, output_scale): return extrapolated, cond def interpolate_at_t1(self, rv, extra, /, *, prior): - cond_identity = self.ssm.conditional.identity(prior.num_derivatives + 1) + del prior + cond_identity = self.ssm.conditional.identity(ssm.num_derivatives + 1) return _InterpRes((rv, cond_identity), (rv, extra), (rv, cond_identity)) def interpolate(self, state_t0, marginal_t1, *, dt0, dt1, output_scale, prior): @@ -474,14 +466,14 @@ def interpolate(self, state_t0, marginal_t1, *, dt0, dt1, output_scale, prior): """ # Extrapolate from t0 to t, and from t to t1. # This yields all building blocks. - prior0 = prior.discretize(dt0) + prior0 = prior(dt0) extrapolated_t = self._extrapolate( *state_t0, output_scale=output_scale, prior_discretized=prior0 ) - conditional_id = self.ssm.conditional.identity(prior.num_derivatives + 1) + conditional_id = self.ssm.conditional.identity(ssm.num_derivatives + 1) previous_new = (extrapolated_t[0], conditional_id) - prior1 = prior.discretize(dt1) + prior1 = prior(dt1) extrapolated_t1 = self._extrapolate( *previous_new, output_scale=output_scale, prior_discretized=prior1 ) @@ -636,7 +628,6 @@ class _State(containers.NamedTuple): @containers.dataclass class _ProbabilisticSolver: name: str - requires_rescaling: bool step_implementation: Callable @@ -653,7 +644,6 @@ def offgrid_marginals(self, *, t, marginals_t1, posterior_t0, t0, t1, output_sca dt0 = t - t0 dt1 = t1 - t - rv, extra = self.strategy.init(posterior_t0) rv, corr = self.correction.init(rv) @@ -673,7 +663,7 @@ def offgrid_marginals(self, *, t, marginals_t1, posterior_t0, t0, t1, output_sca @property def error_contraction_rate(self): - return self.prior.num_derivatives + 1 + return self.ssm.num_derivatives + 1 @property def is_suitable_for_offgrid_marginals(self): @@ -687,25 +677,11 @@ def is_suitable_for_save_at(self): def is_suitable_for_save_every_step(self): return self.strategy.is_suitable_for_save_every_step - def initial_condition(self) -> ivpsolve.IVPSolution: - """Construct an initial condition.""" - posterior = self.strategy.initial_condition(prior=self.prior) - return ivpsolve.IVPSolution( - t=None, - u=None, - u_std=None, - output_scale=self.prior.output_scale, - marginals=None, - posterior=posterior, - num_steps=None, - ssm=None, - ) - - def init(self, t, initial_condition: ivpsolve.IVPSolution) -> _State: - rv, extra = self.strategy.init(initial_condition.posterior) + def init(self, t, init) -> _State: + rv, extra = self.strategy.init(init) rv, corr = self.correction.init(rv) - calib_state = self.calibration.init(initial_condition.output_scale) + calib_state = self.calibration.init() return _State(t=t, hidden=rv, aux_extra=extra, output_scale=calib_state) def step(self, state: _State, *, vector_field, dt): @@ -782,7 +758,7 @@ def solver_mle(strategy, *, correction, prior, ssm): def step_mle(state, /, *, dt, vector_field, calibration): output_scale_prior, _calibrated = calibration.extract(state.output_scale) - prior_discretized = prior.discretize(dt) + prior_discretized = prior(dt) hidden, extra = strategy.begin( state.hidden, state.aux_extra, prior_discretized=prior_discretized ) @@ -808,7 +784,6 @@ def step_mle(state, /, *, dt, vector_field, calibration): step_implementation=step_mle, strategy=strategy, correction=correction, - requires_rescaling=True, ) @@ -818,7 +793,8 @@ def _calibration_running_mean(*, ssm) -> _Calibration: # marginal likelihoods. # In this case, the _calibration_most_recent() stuff becomes void. - def init(prior): + def init(): + prior = ssm.prototypes.output_scale() return prior, prior, 0.0 def update(state, /, observed): @@ -839,7 +815,7 @@ def solver_dynamic(strategy, *, correction, prior, ssm): """Create a solver that calibrates the output scale dynamically.""" def step_dynamic(state, /, *, dt, vector_field, calibration): - prior_discretized = prior.discretize(dt) + prior_discretized = prior(dt) hidden, extra = strategy.begin( state.hidden, state.aux_extra, prior_discretized=prior_discretized ) @@ -866,13 +842,12 @@ def step_dynamic(state, /, *, dt, vector_field, calibration): calibration=_calibration_most_recent(ssm=ssm), name="Dynamic probabilistic solver", step_implementation=step_dynamic, - requires_rescaling=False, ) def _calibration_most_recent(*, ssm) -> _Calibration: - def init(prior): - return prior + def init(): + return ssm.prototypes.output_scale() def update(_state, /, observed): return ssm.stats.mahalanobis_norm_relative(0.0, observed) @@ -889,7 +864,7 @@ def solver(strategy, *, correction, prior, ssm): def step(state: _State, *, vector_field, dt, calibration): del calibration # unused - prior_discretized = prior.discretize(dt) + prior_discretized = prior(dt) hidden, extra = strategy.begin( state.hidden, state.aux_extra, prior_discretized=prior_discretized ) @@ -914,16 +889,15 @@ def step(state: _State, *, vector_field, dt, calibration): prior=prior, strategy=strategy, correction=correction, - calibration=_calibration_none(), + calibration=_calibration_none(ssm=ssm), step_implementation=step, name="Probabilistic solver", - requires_rescaling=False, ) -def _calibration_none() -> _Calibration: - def init(prior): - return prior +def _calibration_none(*, ssm) -> _Calibration: + def init(): + return ssm.prototypes.output_scale() def update(_state, /, observed): raise NotImplementedError diff --git a/tests/test_ivpsolve/test_fixed_grid_vs_save_every_step.py b/tests/test_ivpsolve/test_fixed_grid_vs_save_every_step.py index 6ff982c07..7034af433 100644 --- a/tests/test_ivpsolve/test_fixed_grid_vs_save_every_step.py +++ b/tests/test_ivpsolve/test_fixed_grid_vs_save_every_step.py @@ -17,14 +17,13 @@ class Taylor(containers.NamedTuple): tcoeffs = Taylor(*taylor.odejet_padded_scan(lambda y: vf(y, t=t0), u0, num=2)) - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=fact) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact=fact) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = ivpsolvers.strategy_filter(ssm=ssm) solver = ivpsolvers.solver_mle(strategy, prior=ibm, correction=ts0, ssm=ssm) asolver = ivpsolvers.adaptive(solver, ssm=ssm, atol=1e-2, rtol=1e-2, clip_dt=True) - init = solver.initial_condition() args = (vf, init) adaptive_kwargs = { diff --git a/tests/test_ivpsolve/test_save_at_vs_save_every_step.py b/tests/test_ivpsolve/test_save_at_vs_save_every_step.py index d6ce9b261..235605e7b 100644 --- a/tests/test_ivpsolve/test_save_at_vs_save_every_step.py +++ b/tests/test_ivpsolve/test_save_at_vs_save_every_step.py @@ -12,13 +12,12 @@ def test_save_at_result_matches_interpolated_adaptive_result(fact): # Generate a solver tcoeffs = taylor.odejet_padded_scan(lambda y: vf(y, t=t0), u0, num=2) - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=fact) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact=fact) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = ivpsolvers.strategy_filter(ssm=ssm) solver = ivpsolvers.solver(strategy, prior=ibm, correction=ts0, ssm=ssm) adaptive_solver = ivpsolvers.adaptive(solver, atol=1e-2, rtol=1e-2, ssm=ssm) - init = solver.initial_condition() problem_args = (vf, init) adaptive_kwargs = {"adaptive_solver": adaptive_solver, "dt0": 0.1, "ssm": ssm} diff --git a/tests/test_ivpsolve/test_save_every_step.py b/tests/test_ivpsolve/test_save_every_step.py index c20eb25ff..65a41bca3 100644 --- a/tests/test_ivpsolve/test_save_every_step.py +++ b/tests/test_ivpsolve/test_save_every_step.py @@ -27,11 +27,11 @@ def python_loop_solution(ivp, *, fact, strategy_fun): vf, u0, (t0, t1) = ivp tcoeffs = taylor.odejet_padded_scan(lambda y: vf(y, t=t0), u0, num=4) - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=fact) + init, transition, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact=fact) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = strategy_fun(ssm=ssm) - solver = ivpsolvers.solver_mle(strategy, prior=ibm, correction=ts0, ssm=ssm) + solver = ivpsolvers.solver_mle(strategy, prior=transition, correction=ts0, ssm=ssm) # clip=False because we need to test adaptive-step-interpolation # for smoothers @@ -42,9 +42,6 @@ def python_loop_solution(ivp, *, fact, strategy_fun): dt0 = ivpsolve.dt0_adaptive( vf, u0, t0=t0, atol=1e-2, rtol=1e-2, error_contraction_rate=5 ) - - init = solver.initial_condition() - args = (vf, init) kwargs = { "t0": t0, diff --git a/tests/test_ivpsolve/test_solution_api.py b/tests/test_ivpsolve/test_solution_api.py index 433e394d5..b24474ab9 100644 --- a/tests/test_ivpsolve/test_solution_api.py +++ b/tests/test_ivpsolve/test_solution_api.py @@ -20,14 +20,12 @@ def fixture_pn_solution(fact): # Generate a solver tcoeffs = Taylor(*taylor.odejet_padded_scan(lambda y: vf(y, t=t0), u0, num=2)) - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=fact) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact=fact) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = ivpsolvers.strategy_filter(ssm=ssm) solver = ivpsolvers.solver_mle(strategy, prior=ibm, correction=ts0, ssm=ssm) asolver = ivpsolvers.adaptive(solver, atol=1e-2, rtol=1e-2, ssm=ssm) - - init = solver.initial_condition() return ivpsolve.solve_adaptive_save_every_step( vf, init, t0=t0, t1=t1, dt0=0.1, adaptive_solver=asolver, ssm=ssm ) diff --git a/tests/test_ivpsolve/test_terminal_values_vs_save_every_step.py b/tests/test_ivpsolve/test_terminal_values_vs_save_every_step.py index dc27f8e03..d4ac4ebc2 100644 --- a/tests/test_ivpsolve/test_terminal_values_vs_save_every_step.py +++ b/tests/test_ivpsolve/test_terminal_values_vs_save_every_step.py @@ -12,15 +12,13 @@ def test_terminal_values_identical(fact): # Generate a solver tcoeffs = taylor.odejet_padded_scan(lambda y: vf(y, t=t0), u0, num=2) - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=fact) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact=fact) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = ivpsolvers.strategy_filter(ssm=ssm) solver = ivpsolvers.solver_mle(strategy, prior=ibm, correction=ts0, ssm=ssm) asolver = ivpsolvers.adaptive(solver, atol=1e-2, rtol=1e-2, ssm=ssm) - init = solver.initial_condition() - args = (vf, init) kwargs = {"t0": t0, "t1": t1, "adaptive_solver": asolver, "dt0": 0.1, "ssm": ssm} diff --git a/tests/test_ivpsolvers/test_calibration_dynamic_vs_mle.py b/tests/test_ivpsolvers/test_calibration_dynamic_vs_mle.py index a74c95379..75cd63b85 100644 --- a/tests/test_ivpsolvers/test_calibration_dynamic_vs_mle.py +++ b/tests/test_ivpsolvers/test_calibration_dynamic_vs_mle.py @@ -14,13 +14,12 @@ def test_exponential_approximated_well(fact): vf, u0, (t0, t1) = ode.ivp_lotka_volterra() - ibm, ssm = ivpsolvers.prior_ibm((*u0, vf(*u0, t=t0)), ssm_fact=fact) + tcoeffs = (*u0, vf(*u0, t=t0)) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact=fact) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = ivpsolvers.strategy_filter(ssm=ssm) solver = ivpsolvers.solver_dynamic(strategy, prior=ibm, correction=ts0, ssm=ssm) - init = solver.initial_condition() - problem_args = (vf, init) grid = np.linspace(t0, t1, num=20) solver_kwargs = {"grid": grid, "solver": solver, "ssm": ssm} diff --git a/tests/test_ivpsolvers/test_calibration_mle_vs_none.py b/tests/test_ivpsolvers/test_calibration_mle_vs_none.py index f7e3f7788..dc8a885e6 100644 --- a/tests/test_ivpsolvers/test_calibration_mle_vs_none.py +++ b/tests/test_ivpsolvers/test_calibration_mle_vs_none.py @@ -16,14 +16,13 @@ def case_solve_fixed_grid(fact): vf, u0, (t0, t1) = ode.ivp_lotka_volterra() tcoeffs = taylor.odejet_padded_scan(lambda y: vf(y, t=t0), u0, num=4) - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=fact) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact=fact) ts0 = ivpsolvers.correction_ts0(ssm=ssm) kwargs = {"grid": np.linspace(t0, t1, endpoint=True, num=5), "ssm": ssm} def solver_to_solution(solver_fun, strategy_fun): strategy = strategy_fun(ssm=ssm) solver = solver_fun(strategy, prior=ibm, correction=ts0, ssm=ssm) - init = solver.initial_condition() return ivpsolve.solve_fixed_grid(vf, init, solver=solver, **kwargs) return solver_to_solution, ssm @@ -37,7 +36,7 @@ def case_solve_adaptive_save_at(fact): dt0 = ivpsolve.dt0(lambda y: vf(y, t=t0), u0) tcoeffs = taylor.odejet_padded_scan(lambda y: vf(y, t=t0), u0, num=4) - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=fact) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact=fact) ts0 = ivpsolvers.correction_ts0(ssm=ssm) save_at = np.linspace(t0, t1, endpoint=True, num=5) @@ -46,8 +45,6 @@ def case_solve_adaptive_save_at(fact): def solver_to_solution(solver_fun, strategy_fun): strategy = strategy_fun(ssm=ssm) solver = solver_fun(strategy, prior=ibm, correction=ts0, ssm=ssm) - - init = solver.initial_condition() adaptive_solver = ivpsolvers.adaptive(solver, atol=1e-2, rtol=1e-2, ssm=ssm) return ivpsolve.solve_adaptive_save_at( vf, init, adaptive_solver=adaptive_solver, **kwargs @@ -64,15 +61,13 @@ def case_solve_adaptive_save_every_step(fact): dt0 = ivpsolve.dt0(lambda y: vf(y, t=t0), u0) tcoeffs = taylor.odejet_padded_scan(lambda y: vf(y, t=t0), u0, num=4) - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=fact) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact=fact) ts0 = ivpsolvers.correction_ts0(ssm=ssm) kwargs = {"t0": t0, "t1": t1, "dt0": dt0, "ssm": ssm} def solver_to_solution(solver_fun, strategy_fun): strategy = strategy_fun(ssm=ssm) solver = solver_fun(strategy, prior=ibm, correction=ts0, ssm=ssm) - - init = solver.initial_condition() adaptive_solver = ivpsolvers.adaptive(solver, atol=1e-2, rtol=1e-2, ssm=ssm) return ivpsolve.solve_adaptive_save_every_step( vf, init, adaptive_solver=adaptive_solver, **kwargs @@ -88,7 +83,7 @@ def case_simulate_terminal_values(fact): dt0 = ivpsolve.dt0(lambda y: vf(y, t=t0), u0) tcoeffs = taylor.odejet_padded_scan(lambda y: vf(y, t=t0), u0, num=4) - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=fact) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact=fact) ts0 = ivpsolvers.correction_ts0(ssm=ssm) kwargs = {"t0": t0, "t1": t1, "dt0": dt0, "ssm": ssm} @@ -96,8 +91,6 @@ def case_simulate_terminal_values(fact): def solver_to_solution(solver_fun, strategy_fun): strategy = strategy_fun(ssm=ssm) solver = solver_fun(strategy, prior=ibm, correction=ts0, ssm=ssm) - - init = solver.initial_condition() adaptive_solver = ivpsolvers.adaptive(solver, ssm=ssm, atol=1e-2, rtol=1e-2) return ivpsolve.solve_adaptive_terminal_values( vf, init, adaptive_solver=adaptive_solver, **kwargs diff --git a/tests/test_ivpsolvers/test_corrections.py b/tests/test_ivpsolvers/test_corrections.py index 9df2e63a2..68f381c09 100644 --- a/tests/test_ivpsolvers/test_corrections.py +++ b/tests/test_ivpsolvers/test_corrections.py @@ -40,7 +40,7 @@ def fixture_solution(correction_impl, fact): try: tcoeffs = taylor.odejet_padded_scan(lambda y: vf(y, t=t0), u0, num=2) - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=fact) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact=fact) corr = correction_impl(ssm=ssm, damp=1e-9) except NotImplementedError: @@ -52,7 +52,6 @@ def fixture_solution(correction_impl, fact): adaptive_kwargs = {"adaptive_solver": adaptive_solver, "dt0": 0.1, "ssm": ssm} - init = solver.initial_condition() return ivpsolve.solve_adaptive_terminal_values( vf, init, t0=t0, t1=t1, **adaptive_kwargs ) diff --git a/tests/test_ivpsolvers/test_strategy_smoother_vs_filter.py b/tests/test_ivpsolvers/test_strategy_smoother_vs_filter.py index 3e1bda1c0..0d813f36b 100644 --- a/tests/test_ivpsolvers/test_strategy_smoother_vs_filter.py +++ b/tests/test_ivpsolvers/test_strategy_smoother_vs_filter.py @@ -18,12 +18,12 @@ def fixture_solver_setup(fact): @testing.fixture(name="filter_solution") def fixture_filter_solution(solver_setup): tcoeffs = solver_setup["tcoeffs"] - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=solver_setup["fact"]) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated( + tcoeffs, ssm_fact=solver_setup["fact"] + ) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = ivpsolvers.strategy_filter(ssm=ssm) solver = ivpsolvers.solver(strategy, prior=ibm, correction=ts0, ssm=ssm) - - init = solver.initial_condition() return ivpsolve.solve_fixed_grid( solver_setup["vf"], init, grid=solver_setup["grid"], solver=solver, ssm=ssm ) @@ -32,12 +32,12 @@ def fixture_filter_solution(solver_setup): @testing.fixture(name="smoother_solution") def fixture_smoother_solution(solver_setup): tcoeffs = solver_setup["tcoeffs"] - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=solver_setup["fact"]) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated( + tcoeffs, ssm_fact=solver_setup["fact"] + ) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = ivpsolvers.strategy_smoother(ssm=ssm) solver = ivpsolvers.solver(strategy, prior=ibm, correction=ts0, ssm=ssm) - - init = solver.initial_condition() return ivpsolve.solve_fixed_grid( solver_setup["vf"], init, grid=solver_setup["grid"], solver=solver, ssm=ssm ) diff --git a/tests/test_ivpsolvers/test_strategy_smoother_vs_fixedpoint.py b/tests/test_ivpsolvers/test_strategy_smoother_vs_fixedpoint.py index 3773043d8..b3e1514fc 100644 --- a/tests/test_ivpsolvers/test_strategy_smoother_vs_fixedpoint.py +++ b/tests/test_ivpsolvers/test_strategy_smoother_vs_fixedpoint.py @@ -20,13 +20,11 @@ def fixture_solver_setup(fact): @testing.fixture(name="solution_smoother") def fixture_solution_smoother(solver_setup): tcoeffs, fact = solver_setup["tcoeffs"], solver_setup["fact"] - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=fact) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact=fact) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = ivpsolvers.strategy_smoother(ssm=ssm) solver = ivpsolvers.solver(strategy, prior=ibm, correction=ts0, ssm=ssm) adaptive_solver = ivpsolvers.adaptive(solver, atol=1e-3, rtol=1e-3, ssm=ssm) - - init = solver.initial_condition() return ivpsolve.solve_adaptive_save_every_step( solver_setup["vf"], init, @@ -41,15 +39,13 @@ def fixture_solution_smoother(solver_setup): def test_fixedpoint_smoother_equivalent_same_grid(solver_setup, solution_smoother): """Test that with save_at=smoother_solution.t, the results should be identical.""" tcoeffs, fact = solver_setup["tcoeffs"], solver_setup["fact"] - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=fact) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact=fact) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = ivpsolvers.strategy_fixedpoint(ssm=ssm) solver = ivpsolvers.solver(strategy, prior=ibm, correction=ts0, ssm=ssm) adaptive_solver = ivpsolvers.adaptive(solver, atol=1e-3, rtol=1e-3, ssm=ssm) save_at = solution_smoother.t - - init = solver.initial_condition() solution_fixedpoint = ivpsolve.solve_adaptive_save_at( solver_setup["vf"], init, @@ -67,7 +63,7 @@ def test_fixedpoint_smoother_equivalent_different_grid(solver_setup, solution_sm # Re-generate the smoothing solver tcoeffs, fact = solver_setup["tcoeffs"], solver_setup["fact"] - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=fact) + _init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact=fact) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = ivpsolvers.strategy_smoother(ssm=ssm) solver_smoother = ivpsolvers.solver(strategy, prior=ibm, correction=ts0, ssm=ssm) @@ -80,13 +76,11 @@ def test_fixedpoint_smoother_equivalent_different_grid(solver_setup, solution_sm # Generate a fixedpoint solver and solve (saving at the interpolation points) tcoeffs, fact = solver_setup["tcoeffs"], solver_setup["fact"] - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=fact) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact=fact) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = ivpsolvers.strategy_fixedpoint(ssm=ssm) solver = ivpsolvers.solver(strategy, prior=ibm, correction=ts0, ssm=ssm) adaptive_solver = ivpsolvers.adaptive(solver, atol=1e-3, rtol=1e-3, ssm=ssm) - init = solver.initial_condition() - solution_fixedpoint = ivpsolve.solve_adaptive_save_at( solver_setup["vf"], init, diff --git a/tests/test_ivpsolvers/test_strategy_warnings_for_wrong_strategies.py b/tests/test_ivpsolvers/test_strategy_warnings_for_wrong_strategies.py index 06c0d3682..52fd69ad7 100644 --- a/tests/test_ivpsolvers/test_strategy_warnings_for_wrong_strategies.py +++ b/tests/test_ivpsolvers/test_strategy_warnings_for_wrong_strategies.py @@ -10,15 +10,13 @@ def test_warning_for_fixedpoint_in_save_every_step_mode(fact): vf, (u0,), (t0, t1) = ode.ivp_lotka_volterra() tcoeffs = taylor.odejet_padded_scan(lambda y: vf(y, t=t0), (u0,), num=2) - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=fact) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact=fact) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = ivpsolvers.strategy_fixedpoint(ssm=ssm) solver = ivpsolvers.solver(strategy, prior=ibm, correction=ts0, ssm=ssm) adaptive_solver = ivpsolvers.adaptive(solver, atol=1e-2, rtol=1e-2, ssm=ssm) - init = solver.initial_condition() - with testing.warns(): _ = ivpsolve.solve_adaptive_save_every_step( vf, init, t0=t0, t1=t1, adaptive_solver=adaptive_solver, dt0=0.1, ssm=ssm @@ -30,14 +28,11 @@ def test_warning_for_smoother_in_save_at_mode(fact): vf, (u0,), (t0, t1) = ode.ivp_lotka_volterra() tcoeffs = taylor.odejet_padded_scan(lambda y: vf(y, t=t0), (u0,), num=2) - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=fact) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact=fact) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = ivpsolvers.strategy_smoother(ssm=ssm) solver = ivpsolvers.solver(strategy, prior=ibm, correction=ts0, ssm=ssm) adaptive_solver = ivpsolvers.adaptive(solver, atol=1e-2, rtol=1e-2, ssm=ssm) - - init = solver.initial_condition() - with testing.warns(): _ = ivpsolve.solve_adaptive_save_at( vf, diff --git a/tests/test_stats/test_log_marginal_likelihood.py b/tests/test_stats/test_log_marginal_likelihood.py index 32feeaba2..06085c6c4 100644 --- a/tests/test_stats/test_log_marginal_likelihood.py +++ b/tests/test_stats/test_log_marginal_likelihood.py @@ -11,15 +11,12 @@ def fixture_solution(fact): vf, (u0,), (t0, t1) = ode.ivp_lotka_volterra() tcoeffs = taylor.odejet_padded_scan(lambda y: vf(y, t=t0), (u0,), num=2) - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=fact) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact=fact) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = ivpsolvers.strategy_fixedpoint(ssm=ssm) solver = ivpsolvers.solver(strategy, prior=ibm, correction=ts0, ssm=ssm) adaptive_solver = ivpsolvers.adaptive(solver, atol=1e-2, rtol=1e-2, ssm=ssm) - - init = solver.initial_condition() - save_at = np.linspace(t0, t1, endpoint=True, num=4) sol = ivpsolve.solve_adaptive_save_at( vf, init, save_at=save_at, adaptive_solver=adaptive_solver, dt0=0.1, ssm=ssm @@ -93,14 +90,13 @@ def test_raises_error_for_filter(fact): vf, (u0,), (t0, t1) = ode.ivp_lotka_volterra() tcoeffs = taylor.odejet_padded_scan(lambda y: vf(y, t=t0), (u0,), num=2) - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=fact) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact=fact) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = ivpsolvers.strategy_filter(ssm=ssm) solver = ivpsolvers.solver(strategy, prior=ibm, correction=ts0, ssm=ssm) grid = np.linspace(t0, t1, num=3) - init = solver.initial_condition() sol = ivpsolve.solve_fixed_grid(vf, init, grid=grid, solver=solver, ssm=ssm) data = sol.u[0] + 0.1 diff --git a/tests/test_stats/test_log_marginal_likelihood_terminal_values.py b/tests/test_stats/test_log_marginal_likelihood_terminal_values.py index 9a4093aab..4a325fef1 100644 --- a/tests/test_stats/test_log_marginal_likelihood_terminal_values.py +++ b/tests/test_stats/test_log_marginal_likelihood_terminal_values.py @@ -27,13 +27,11 @@ def fixture_solution(strategy_func, fact): vf, (u0,), (t0, t1) = ode.ivp_lotka_volterra() tcoeffs = taylor.odejet_padded_scan(lambda y: vf(y, t=t0), (u0,), num=4) - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=fact) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact=fact) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = strategy_func(ssm=ssm) solver = ivpsolvers.solver(strategy, prior=ibm, correction=ts0, ssm=ssm) adaptive_solver = ivpsolvers.adaptive(solver, atol=1e-2, rtol=1e-2, ssm=ssm) - - init = solver.initial_condition() sol = ivpsolve.solve_adaptive_terminal_values( vf, init, t0=t0, t1=t1, adaptive_solver=adaptive_solver, dt0=0.1, ssm=ssm ) diff --git a/tests/test_stats/test_offgrid_marginals.py b/tests/test_stats/test_offgrid_marginals.py index baf8a8adc..eebb819f3 100644 --- a/tests/test_stats/test_offgrid_marginals.py +++ b/tests/test_stats/test_offgrid_marginals.py @@ -11,12 +11,10 @@ def test_filter_marginals_close_only_to_left_boundary(fact): vf, (u0,), (t0, t1) = ode.ivp_lotka_volterra() tcoeffs = (u0, vf(u0, t=t0)) - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=fact) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact=fact) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = ivpsolvers.strategy_filter(ssm=ssm) solver = ivpsolvers.solver(strategy, prior=ibm, correction=ts0, ssm=ssm) - - init = solver.initial_condition() grid = np.linspace(t0, t1, endpoint=True, num=5) sol = ivpsolve.solve_fixed_grid(vf, init, grid=grid, solver=solver, ssm=ssm) @@ -35,12 +33,11 @@ def test_smoother_marginals_close_to_both_boundaries(fact): vf, (u0,), (t0, t1) = ode.ivp_lotka_volterra() tcoeffs = taylor.odejet_padded_scan(lambda y: vf(y, t=t0), (u0,), num=4) - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=fact) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact=fact) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = ivpsolvers.strategy_smoother(ssm=ssm) solver = ivpsolvers.solver(strategy, prior=ibm, correction=ts0, ssm=ssm) - init = solver.initial_condition() grid = np.linspace(t0, t1, endpoint=True, num=5) sol = ivpsolve.solve_fixed_grid(vf, init, grid=grid, solver=solver, ssm=ssm) # Extrapolate from the left: close-to-left boundary must be similar, diff --git a/tests/test_stats/test_sample.py b/tests/test_stats/test_sample.py index 604682048..cafa015b3 100644 --- a/tests/test_stats/test_sample.py +++ b/tests/test_stats/test_sample.py @@ -10,13 +10,11 @@ def fixture_approximation(fact): vf, (u0,), (t0, t1) = ode.ivp_lotka_volterra() tcoeffs = taylor.odejet_padded_scan(lambda y: vf(y, t=t0), (u0,), num=2) - ibm, ssm = ivpsolvers.prior_ibm(tcoeffs, ssm_fact=fact) + init, ibm, ssm = ivpsolvers.prior_wiener_integrated(tcoeffs, ssm_fact=fact) ts0 = ivpsolvers.correction_ts0(ssm=ssm) strategy = ivpsolvers.strategy_smoother(ssm=ssm) solver = ivpsolvers.solver(strategy, prior=ibm, correction=ts0, ssm=ssm) adaptive_solver = ivpsolvers.adaptive(solver, atol=1e-2, rtol=1e-2, ssm=ssm) - - init = solver.initial_condition() return ivpsolve.solve_adaptive_save_every_step( vf, init, t0=t0, t1=t1, adaptive_solver=adaptive_solver, dt0=0.1, ssm=ssm )