Skip to content

Commit e96a8d7

Browse files
committed
Upgrade pre-commit hook and fix resulting complaints
1 parent 86ab9ca commit e96a8d7

15 files changed

Lines changed: 28 additions & 29 deletions

docs/examples_advanced/neural_ode.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ def main(num_data=100, epochs=500, print_every=50, hidden=(20,), lr=0.2):
3333
data = jnp.sin(2.5 * jnp.pi * grid) * jnp.pi * grid
3434
stdev = 1e-1
3535
output_scale = 1e4
36-
vf, u0, (t0, t1), f_args = vf_neural_ode(hidden=hidden, t0=0.0, t1=1)
36+
vf, u0, (t0, _t1), f_args = vf_neural_ode(hidden=hidden, t0=0.0, t1=1)
3737

3838
# Create a loss (this is where probabilistic numerics enters!)
3939
loss = loss_log_marginal_likelihood(vf=vf, t0=t0)

docs/examples_advanced/solve_pde.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ def vf(y, *, t): # noqa: ARG001
5959
simulate = simulator(save_at=save_at, errorest=errorest, solver=solver)
6060
(u, u_std) = simulate(init)
6161

62-
fig, axes = plt.subplots(
62+
_fig, axes = plt.subplots(
6363
nrows=2, ncols=len(u), figsize=(2 * len(u), 3), tight_layout=True
6464
)
6565
for t_i, u_i, std_i, ax_i in zip(save_at, u, u_std, axes.T):

docs/examples_benchmarks/convergence-rates-lotka-volterra.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ def main():
7373
results[label] = param_to_wp(tolerances)
7474

7575
layout = [["values", "trends"]]
76-
fig, ax = plt.subplot_mosaic(
76+
_fig, ax = plt.subplot_mosaic(
7777
layout,
7878
figsize=(8, 3),
7979
constrained_layout=True,
@@ -209,7 +209,7 @@ def parameter_list_to_workprecision(list_of_args, /):
209209
works_std = []
210210
precisions = []
211211
for arg in list_of_args:
212-
x, num_steps = fun(arg)
212+
_x, num_steps = fun(arg)
213213

214214
precision = precision_fun(fun(arg)[0].block_until_ready())
215215
times = timeit_fun(lambda: fun(arg)[0].block_until_ready()) # noqa: B023

docs/examples_benchmarks/taylor-init-fitzhughnagumo.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def main(max_time=0.25, repeats=2):
5555
algo, timeit_fun=timeit_fun, max_time=max_time
5656
)
5757

58-
fig, (axis_perform, axis_compile) = plt.subplots(
58+
_fig, (axis_perform, axis_compile) = plt.subplots(
5959
ncols=2, figsize=(8, 3), dpi=150, sharex=True, sharey=True
6060
)
6161

docs/examples_benchmarks/taylor-init-node.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def main(max_time=0.55, repeats=2):
5555
algo, timeit_fun=timeit_fun, max_time=max_time
5656
)
5757

58-
fig, (axis_perform, axis_compile) = plt.subplots(
58+
_fig, (axis_perform, axis_compile) = plt.subplots(
5959
ncols=2, figsize=(8, 3), dpi=150, sharex=True, sharey=True
6060
)
6161

docs/examples_benchmarks/taylor-init-pleiades.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ def main(max_time=0.5, repeats=2):
5454
algo, timeit_fun=timeit_fun, max_time=max_time
5555
)
5656

57-
fig, (axis_perform, axis_compile) = plt.subplots(
57+
_fig, (axis_perform, axis_compile) = plt.subplots(
5858
ncols=2, figsize=(8, 3), dpi=150, sharex=True, sharey=True
5959
)
6060

docs/examples_benchmarks/work-precision-hires.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ def main(start=1.0, stop=9.0, step=1.0, repeats=2, use_diffrax: bool = False):
4242
# Simulate once to plot the state
4343
ts, ys = solve_ivp_once()
4444

45-
fig, ax = plt.subplots(figsize=(5, 3))
45+
_fig, ax = plt.subplots(figsize=(5, 3))
4646
ax.plot(ts, ys)
4747
ax.set_title("Hires problem")
4848
ax.set_xlabel("Time")
@@ -80,7 +80,7 @@ def main(start=1.0, stop=9.0, step=1.0, repeats=2, use_diffrax: bool = False):
8080
param_to_wp = workprec(algo, precision_fun=precision_fun, timeit_fun=timeit_fun)
8181
results[label] = param_to_wp(tolerances)
8282

83-
fig, ax = plt.subplots(figsize=(5, 3))
83+
_fig, ax = plt.subplots(figsize=(5, 3))
8484
for label, wp in results.items():
8585
ax.loglog(wp["precision"], wp["work_mean"], label=label)
8686

docs/examples_benchmarks/work-precision-lotka-volterra.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ def main(start=3.0, stop=12.0, step=1.0, repeats=2, use_diffrax: bool = False):
4242
# Simulate once to plot the state
4343
ts, ys = solve_ivp_once()
4444

45-
fig, ax = plt.subplots(figsize=(5, 3))
45+
_fig, ax = plt.subplots(figsize=(5, 3))
4646
ax.plot(ts, ys)
4747
ax.set_title("Lotka-Volterra problem")
4848
ax.set_xlabel("Time")
@@ -87,7 +87,7 @@ def main(start=3.0, stop=12.0, step=1.0, repeats=2, use_diffrax: bool = False):
8787
param_to_wp = workprec(algo, precision_fun=precision_fun, timeit_fun=timeit_fun)
8888
results[label] = param_to_wp(tolerances)
8989

90-
fig, ax = plt.subplots(figsize=(7, 3))
90+
_fig, ax = plt.subplots(figsize=(7, 3))
9191
for label, wp in results.items():
9292
ax.loglog(wp["precision"], wp["work_mean"], label=label)
9393

docs/examples_benchmarks/work-precision-pleiades.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,9 +41,9 @@ def main(start=3.0, stop=11.0, step=1.0, repeats=2, use_diffrax: bool = False):
4141
jax.config.update("jax_enable_x64", True)
4242

4343
# Simulate once to plot the state
44-
ts, ys = solve_ivp_once()
44+
_ts, ys = solve_ivp_once()
4545

46-
fig, ax = plt.subplots(figsize=(5, 3))
46+
_fig, ax = plt.subplots(figsize=(5, 3))
4747
ax.plot(ys[:, :7], ys[:, 7:14], linestyle="solid", marker="None")
4848
ax.plot(ys[0, :7], ys[0, 7:14], linestyle="None", marker=".", markersize=4)
4949
ax.plot(ys[-1, :7], ys[-1, 7:14], linestyle="None", marker="*", markersize=8)
@@ -92,7 +92,7 @@ def main(start=3.0, stop=11.0, step=1.0, repeats=2, use_diffrax: bool = False):
9292
param_to_wp = workprec(algo, precision_fun=precision_fun, timeit_fun=timeit_fun)
9393
results[label] = param_to_wp(tolerances)
9494

95-
fig, ax = plt.subplots(figsize=(7, 3))
95+
_fig, ax = plt.subplots(figsize=(7, 3))
9696
for label, wp in results.items():
9797
ax.loglog(wp["precision"], wp["work_mean"], label=label)
9898

docs/examples_benchmarks/work-precision-vanderpol.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ def main(start=2.0, stop=8.0, step=1.0, repeats=2, use_diffrax: bool = False):
4242
# Simulate once to plot the state
4343
ts, ys = solve_ivp_once()
4444

45-
fig, ax = plt.subplots(figsize=(5, 3))
45+
_fig, ax = plt.subplots(figsize=(5, 3))
4646
ax.plot(ts, ys)
4747
ax.set_ylim((-6, 6))
4848
ax.set_title("Van-der-Pol problem")
@@ -80,7 +80,7 @@ def main(start=2.0, stop=8.0, step=1.0, repeats=2, use_diffrax: bool = False):
8080
param_to_wp = workprec(algo, precision_fun=precision_fun, timeit_fun=timeit_fun)
8181
results[label] = param_to_wp(tolerances)
8282

83-
fig, ax = plt.subplots(figsize=(7, 3))
83+
_fig, ax = plt.subplots(figsize=(7, 3))
8484
for label, wp in results.items():
8585
ax.loglog(wp["precision"], wp["work_mean"], label=label)
8686

0 commit comments

Comments
 (0)