Skip to content
Open
Show file tree
Hide file tree
Changes from 24 commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
c65a2ec
Removing timedelta64 from advection and diffusion kernels
erikvansebille Nov 21, 2025
a0ba1bb
Using time as float in Parcels internals
erikvansebille Nov 24, 2025
1b9d9e9
Fix last unit test
erikvansebille Nov 25, 2025
7a77802
merge commit
erikvansebille Nov 25, 2025
137c858
Fixing unit tests
erikvansebille Nov 25, 2025
669a1b9
Fix advection kernels to use time as float
erikvansebille Nov 25, 2025
18a4590
Adding next_dt variable to make RK45 work
erikvansebille Nov 25, 2025
fc8f1e7
Using timedelta_to_float function throughout
erikvansebille Nov 26, 2025
69aa16b
Updating progress bar time description
erikvansebille Nov 26, 2025
8480d1b
Updating argo tutorial to use float dt
erikvansebille Nov 26, 2025
8c042d3
FIxing float_to_datetime without time_interval
erikvansebille Nov 26, 2025
a114379
Merge branch 'v4-dev' into time_dt_as_float
erikvansebille Nov 27, 2025
d33b680
Fixing kernelloop tutorial kernel to not use dt=timedelta
erikvansebille Nov 27, 2025
c27a1e0
Merge branch 'v4-dev' into time_dt_as_float
erikvansebille Dec 1, 2025
da35f25
Cleaning up pset.execute time as floats
erikvansebille Dec 1, 2025
b988f9c
Fixing unit tests
erikvansebille Dec 2, 2025
44278c8
Adding _warn_outputdt_release_desync call in pset.execute()
erikvansebille Dec 2, 2025
4b59f90
Merge branch 'v4-dev' into time_dt_as_float
erikvansebille Dec 2, 2025
83315d4
Fixing diffusion tutorial
erikvansebille Dec 2, 2025
b25a052
Removing dt conversion from diffusion notebook
erikvansebille Dec 2, 2025
a1e46d6
Fixing docstring typo
erikvansebille Dec 2, 2025
3d21467
Remove _SAME_AS_FIELDSET_TIME_INTERVAL
erikvansebille Dec 2, 2025
5bcf378
Also supporting outputdt as float
erikvansebille Dec 3, 2025
e479387
Refactor AdvectionRK45 kernel selection into dedicated util
VeckoTheGecko Dec 3, 2025
63896ac
Implementing review suggestion
erikvansebille Dec 3, 2025
88a2c96
Update v4-migration.md
erikvansebille Dec 4, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions docs/user_guide/examples/explanation_kernelloop.md
Original file line number Diff line number Diff line change
Expand Up @@ -78,12 +78,11 @@ Now we define a wind kernel that uses a forward Euler method to apply the wind f

```{code-cell}
def wind_kernel(particles, fieldset):
dt_float = particles.dt / np.timedelta64(1, 's')
particles.dlon += (
fieldset.UWind[particles] * dt_float
fieldset.UWind[particles] * particles.dt
)
particles.dlat += (
fieldset.VWind[particles] * dt_float
fieldset.VWind[particles] * particles.dt
)
```

Expand Down
29 changes: 7 additions & 22 deletions docs/user_guide/examples/tutorial_Argofloats.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -30,54 +30,44 @@
"driftdepth = 1000 # maximum depth in m\n",
"maxdepth = 2000 # maximum depth in m\n",
"vertical_speed = 0.10 # sink and rise speed in m/s\n",
"cycletime = (\n",
" 10 * 86400\n",
") # total time of cycle in seconds # TODO update to \"timedelta64[s]\"\n",
"cycletime = 10 * 86400 # total time of cycle in seconds\n",
"drifttime = 9 * 86400 # time of deep drift in seconds\n",
"\n",
"\n",
"def ArgoPhase1(particles, fieldset):\n",
" dt = particles.dt / np.timedelta64(1, \"s\") # convert dt to seconds\n",
"\n",
" def SinkingPhase(p):\n",
" \"\"\"Phase 0: Sinking with vertical_speed until depth is driftdepth\"\"\"\n",
" p.dz += vertical_speed * dt\n",
" p.dz += vertical_speed * particles.dt\n",
" p.cycle_phase = np.where(p.z + p.dz >= driftdepth, 1, p.cycle_phase)\n",
" p.dz = np.where(p.z + p.dz >= driftdepth, driftdepth - p.z, p.dz)\n",
"\n",
" SinkingPhase(particles[particles.cycle_phase == 0])\n",
"\n",
"\n",
"def ArgoPhase2(particles, fieldset):\n",
" dt = particles.dt / np.timedelta64(1, \"s\") # convert dt to seconds\n",
"\n",
" def DriftingPhase(p):\n",
" \"\"\"Phase 1: Drifting at depth for drifttime seconds\"\"\"\n",
" p.drift_age += dt\n",
" p.drift_age += particles.dt\n",
" p.cycle_phase = np.where(p.drift_age >= drifttime, 2, p.cycle_phase)\n",
" p.drift_age = np.where(p.drift_age >= drifttime, 0, p.drift_age)\n",
"\n",
" DriftingPhase(particles[particles.cycle_phase == 1])\n",
"\n",
"\n",
"def ArgoPhase3(particles, fieldset):\n",
" dt = particles.dt / np.timedelta64(1, \"s\") # convert dt to seconds\n",
"\n",
" def SecondSinkingPhase(p):\n",
" \"\"\"Phase 2: Sinking further to maxdepth\"\"\"\n",
" p.dz += vertical_speed * dt\n",
" p.dz += vertical_speed * particles.dt\n",
" p.cycle_phase = np.where(p.z + p.dz >= maxdepth, 3, p.cycle_phase)\n",
" p.dz = np.where(p.z + p.dz >= maxdepth, maxdepth - p.z, p.dz)\n",
"\n",
" SecondSinkingPhase(particles[particles.cycle_phase == 2])\n",
"\n",
"\n",
"def ArgoPhase4(particles, fieldset):\n",
" dt = particles.dt / np.timedelta64(1, \"s\") # convert dt to seconds\n",
"\n",
" def RisingPhase(p):\n",
" \"\"\"Phase 3: Rising with vertical_speed until at surface\"\"\"\n",
" p.dz -= vertical_speed * dt\n",
" p.dz -= vertical_speed * particles.dt\n",
" p.temp = fieldset.thetao[p.time, p.z, p.lat, p.lon]\n",
" p.cycle_phase = np.where(p.z + p.dz <= fieldset.mindepth, 4, p.cycle_phase)\n",
" p.dz = np.where(\n",
Expand All @@ -100,8 +90,7 @@
"\n",
"\n",
"def ArgoPhase6(particles, fieldset):\n",
" dt = particles.dt / np.timedelta64(1, \"s\") # convert dt to seconds\n",
" particles.cycle_age += dt # update cycle_age"
" particles.cycle_age += particles.dt # update cycle_age"
]
},
{
Expand Down Expand Up @@ -211,11 +200,7 @@
"x = ds_particles[\"lon\"][:].squeeze()\n",
"y = ds_particles[\"lat\"][:].squeeze()\n",
"z = ds_particles[\"z\"][:].squeeze()\n",
"time = (\n",
" (ds_particles[\"time\"][:].squeeze() - fieldset.time_interval.left).astype(float)\n",
" / 1e9\n",
" / 86400\n",
") # convert time to days\n",
"time = ds_particles[\"time\"][:].squeeze()\n",
"temp = ds_particles[\"temp\"][:].squeeze()"
]
},
Expand Down
11 changes: 6 additions & 5 deletions docs/user_guide/examples/tutorial_diffusion.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,6 @@
"outputs": [],
"source": [
"testParticles = get_test_particles()\n",
"testParticles.update_dt_dtype(\"timedelta64[ms]\")\n",
"output_file = parcels.ParticleFile(\n",
" store=\"M1_out.zarr\",\n",
" chunks=(len(testParticles), 50),\n",
Expand Down Expand Up @@ -345,7 +344,6 @@
"outputs": [],
"source": [
"testParticles = get_test_particles()\n",
"testParticles.update_dt_dtype(\"timedelta64[ms]\")\n",
"output_file = parcels.ParticleFile(\n",
" store=\"EM_out.zarr\",\n",
" chunks=(len(testParticles), 50),\n",
Expand Down Expand Up @@ -431,7 +429,6 @@
"outputs": [],
"source": [
"def smagdiff(particles, fieldset):\n",
" dt = particles.dt / np.timedelta64(1, \"s\")\n",
" dx = 0.01\n",
" # gradients are computed by using a local central difference.\n",
" updx, vpdx = fieldset.UV[\n",
Expand All @@ -458,8 +455,12 @@
" A = A / sq_deg_to_sq_m\n",
" Kh = fieldset.Cs * A * np.sqrt(dudx**2 + 0.5 * (dudy + dvdx) ** 2 + dvdy**2)\n",
"\n",
" dlat = np.random.normal(0.0, 1.0, dt.shape) * np.sqrt(2 * np.fabs(dt) * Kh)\n",
" dlon = np.random.normal(0.0, 1.0, dt.shape) * np.sqrt(2 * np.fabs(dt) * Kh)\n",
" dlat = np.random.normal(0.0, 1.0, particles.dt.shape) * np.sqrt(\n",
" 2 * np.fabs(particles.dt) * Kh\n",
" )\n",
" dlon = np.random.normal(0.0, 1.0, particles.dt.shape) * np.sqrt(\n",
" 2 * np.fabs(particles.dt) * Kh\n",
" )\n",
" particles.dlat += dlat\n",
" particles.dlon += dlon"
]
Expand Down
4 changes: 3 additions & 1 deletion src/parcels/_core/index_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import numpy as np

from parcels._core.statuscodes import _raise_outside_time_interval_error
from parcels._core.utils.time import timedelta_to_float

if TYPE_CHECKING:
from parcels._core.field import Field
Expand Down Expand Up @@ -85,7 +86,8 @@ def _search_time_index(field: Field, time: datetime):
if not field.time_interval.is_all_time_in_interval(time):
_raise_outside_time_interval_error(time, field=None)

ti, tau = _search_1d_array(field.data.time.data, time)
time_flt = timedelta_to_float(field.data.time.data - field.time_interval.left)
ti, tau = _search_1d_array(time_flt, time)

return {"T": {"index": np.atleast_1d(ti), "bcoord": np.atleast_1d(tau)}}

Expand Down
6 changes: 6 additions & 0 deletions src/parcels/_core/kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,10 @@ def PositionUpdate(particles, fieldset): # pragma: no cover
particles.dlat = 0
particles.dz = 0

if hasattr(self.fieldset, "RK45_tol"):
# Update dt in case it's increased in RK45 kernel
particles.dt = particles.next_dt

self._pyfuncs = (PositionUpdate + self)._pyfuncs

def check_fieldsets_in_kernels(self, pyfunc): # TODO v4: this can go into another method? assert_is_compatible()?
Expand All @@ -138,6 +142,8 @@ def check_fieldsets_in_kernels(self, pyfunc): # TODO v4: this can go into anoth
if self._fieldset.U.grid._gtype not in [GridType.CurvilinearZGrid, GridType.RectilinearZGrid]:
raise NotImplementedError("Analytical Advection only works with Z-grids in the vertical")
elif pyfunc is AdvectionRK45:
if "next_dt" not in [v.name for v in self.ptype.variables]:
raise ValueError('ParticleClass requires a "next_dt" for AdvectionRK45 Kernel.')
if not hasattr(self.fieldset, "RK45_tol"):
warnings.warn(
"Setting RK45 tolerance to 10 m. Use fieldset.add_constant('RK45_tol', [distance]) to change.",
Expand Down
35 changes: 6 additions & 29 deletions src/parcels/_core/particle.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from __future__ import annotations

import enum
import operator
from typing import Literal

Expand All @@ -15,8 +14,6 @@
__all__ = ["KernelParticle", "Particle", "ParticleClass", "Variable"]
_TO_WRITE_OPTIONS = [True, False, "once"]

_SAME_AS_FIELDSET_TIME_INTERVAL = enum.Enum("_SAME_AS_FIELDSET_TIME_INTERVAL", "VALUE")


class Variable:
"""Descriptor class that delegates data access to particle data.
Expand All @@ -40,7 +37,7 @@ class Variable:
def __init__(
self,
name,
dtype: np.dtype | _SAME_AS_FIELDSET_TIME_INTERVAL = np.float32,
dtype: np.dtype = np.float32,
initial=0,
to_write: bool | Literal["once"] = True,
attrs: dict | None = None,
Expand All @@ -50,8 +47,7 @@ def __init__(
try:
dtype = np.dtype(dtype)
except (TypeError, ValueError) as e:
if dtype is not _SAME_AS_FIELDSET_TIME_INTERVAL.VALUE:
raise TypeError(f"Variable dtype must be a valid numpy dtype. Got {dtype=!r}") from e
raise TypeError(f"Variable dtype must be a valid numpy dtype. Got {dtype=!r}") from e

if to_write not in _TO_WRITE_OPTIONS:
raise ValueError(f"to_write must be one of {_TO_WRITE_OPTIONS!r}. Got {to_write=!r}")
Expand Down Expand Up @@ -177,7 +173,7 @@ def get_default_particle(spatial_dtype: np.float32 | np.float64) -> ParticleClas
Variable("dz", dtype=spatial_dtype, to_write=False),
Variable(
"time",
dtype=_SAME_AS_FIELDSET_TIME_INTERVAL.VALUE,
dtype=np.float64,
attrs={"standard_name": "time", "units": "seconds", "axis": "T"},
),
Variable(
Expand All @@ -190,7 +186,7 @@ def get_default_particle(spatial_dtype: np.float32 | np.float64) -> ParticleClas
},
),
Variable("obs_written", dtype=np.int32, initial=0, to_write=False),
Variable("dt", dtype="timedelta64[s]", initial=np.timedelta64(1, "s"), to_write=False),
Variable("dt", dtype=np.float64, initial=1.0, to_write=False),
Variable("state", dtype=np.int32, initial=StatusCode.Evaluate, to_write=False),
]
)
Expand All @@ -214,14 +210,7 @@ def create_particle_data(

assert "ei" not in initial, "'ei' is for internal use, and is unique since is only non 1D array"

time_interval_dtype = _get_time_interval_dtype(time_interval)

dtypes = {}
for var in variables.values():
if var.dtype is _SAME_AS_FIELDSET_TIME_INTERVAL.VALUE:
dtypes[var.name] = time_interval_dtype
else:
dtypes[var.name] = var.dtype
dtypes = {var.name: var.dtype for var in variables.values()}

for var_name in initial:
if var_name not in variables:
Expand Down Expand Up @@ -250,20 +239,8 @@ def _create_array_for_variable(variable: Variable, nparticles: int, time_interva
assert not isinstance(variable.initial, operator.attrgetter), (
"This function cannot handle attrgetter initial values."
)
if (dtype := variable.dtype) is _SAME_AS_FIELDSET_TIME_INTERVAL.VALUE:
dtype = _get_time_interval_dtype(time_interval)
return np.full(
shape=(nparticles,),
fill_value=variable.initial,
dtype=dtype,
dtype=variable.dtype,
)


def _get_time_interval_dtype(time_interval: TimeInterval | None) -> np.dtype:
if time_interval is None:
return np.timedelta64(1, "ns")
time = time_interval.left
if isinstance(time, (np.datetime64, np.timedelta64)):
return time.dtype
else:
return object # cftime objects needs to be stored as object dtype
Loading