Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 27 additions & 5 deletions yt/data_objects/data_containers.py
Original file line number Diff line number Diff line change
Expand Up @@ -317,30 +317,46 @@ def _generate_spatial_fluid(self, field, ngz):
if finfo.units is None:
raise YTSpatialFieldUnitError(field)
units = finfo.units
rv = None
try:
rv = self.ds.arr(np.zeros(self.ires.size, dtype="float64"), units)
total_size = self.ires.size
accumulate = False
except YTNonIndexedDataContainer:
# In this case, we'll generate many tiny arrays of unknown size and
# then concatenate them.
outputs = []
accumulate = True

ind = 0
if ngz == 0:
deps = self._identify_dependencies([field], spatial=True)
deps = self._determine_fields(deps)
for _io_chunk in self.chunks([], "io", cache=False):
for _chunk in self.chunks([], "spatial", ngz=0, preload_fields=deps):
o = self._current_chunk.objs[0]
if accumulate:
rv = self.ds.arr(np.empty(o.ires.size, dtype="float64"), units)
outputs.append(rv)
ind = 0 # Does this work with mesh?
with o._activate_cache():
source = self[field]
if accumulate:
if finfo.vector_field:
shape = (o.ires.size, source.shape[-1])
else:
shape = (o.ires.size,)
rv = self.ds.arr(np.empty(shape, dtype="float64"), units)
outputs.append(rv)
ind = 0 # Does this work with mesh?
elif rv is None:
if finfo.vector_field:
shape = (total_size, source.shape[-1])
else:
shape = (total_size,)
rv = self.ds.arr(np.empty(shape, dtype="float64"), units)

ind += o.select(
self.selector, source=self[field], dest=rv, offset=ind
)
else:
if finfo.vector_field:
raise NotImplementedError("Ghost zones not supported for vector fields")
chunks = self.index._chunk(self, "spatial", ngz=ngz)
for chunk in chunks:
with self._chunked_read(chunk):
Expand All @@ -352,6 +368,12 @@ def _generate_spatial_fluid(self, field, ngz):
np.empty(wogz.ires.size, dtype="float64"), units
)
outputs.append(rv)
elif rv is None:
if finfo.vector_field:
shape = (total_size, source.shape[-1])
else:
shape = (total_size,)
rv = self.ds.arr(np.empty(shape, dtype="float64"), units)
ind += wogz.select(
self.selector,
source=gz[field][ngz:-ngz, ngz:-ngz, ngz:-ngz],
Expand Down
44 changes: 38 additions & 6 deletions yt/data_objects/index_subobjects/grid_patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -356,7 +356,14 @@ def smooth(self, *args, **kwargs):
def particle_operation(self, *args, **kwargs):
raise NotImplementedError

def deposit(self, positions, fields=None, method=None, kernel_name="cubic"):
def deposit(
self,
positions,
fields=None,
method=None,
kernel_name="cubic",
vector_field=False,
):
# Here we perform our particle deposition.
cls = getattr(particle_deposit, f"deposit_{method}", None)
if cls is None:
Expand All @@ -365,10 +372,27 @@ def deposit(self, positions, fields=None, method=None, kernel_name="cubic"):
# inside this is Fortran ordered because of the ordering in the
# octree deposit routines, so we reverse it here to match the
# convention there

nvals = tuple(self.ActiveDimensions[::-1])
# append a dummy dimension because we are only depositing onto
# one grid
op = cls(nvals + (1,), kernel_name)
scalar_as_vector = (
not vector_field
and method != "count"
and fields is not None
and len(fields) > 0
and getattr(fields[0], "ndim", 0) == 1
)
if scalar_as_vector:
fields = [np.ascontiguousarray(f.reshape(f.shape[0], 1)) for f in fields]

use_vector_path = vector_field or scalar_as_vector
# Append a dummy grid dimension because we only deposit onto one grid.
# For vector fields, append an additional vector-component dimension.
if use_vector_path:
vec_size = fields[0].shape[-1] if fields else 1
op = cls(nvals + (1, vec_size), kernel_name)
else:
op = cls(nvals + (1,), kernel_name)

op.initialize()
if positions.size > 0:
op.process_grid(self, positions, fields)
Expand All @@ -377,8 +401,16 @@ def deposit(self, positions, fields=None, method=None, kernel_name="cubic"):
return
# Fortran-ordered, so transpose.
vals = vals.transpose()
# squeeze dummy dimension we appended above
return np.squeeze(vals, axis=0)
if use_vector_path:
# (vec, 1, Nx, Ny, Nz) -> (Nx, Ny, Nz, vec)
vals = np.squeeze(vals, axis=1)
vals = np.moveaxis(vals, 0, -1)
if scalar_as_vector:
vals = np.squeeze(vals, axis=-1)
else:
# (1, Nx, Ny, Nz) -> (Nx, Ny, Nz)
vals = np.squeeze(vals, axis=0)
return vals

def select_blocks(self, selector):
mask = self._get_selector_mask(selector)
Expand Down
23 changes: 20 additions & 3 deletions yt/data_objects/index_subobjects/octree_subset.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,14 @@ def select_tcoords(self, dobj):
def domain_ind(self):
return self.oct_handler.domain_ind(self.selector)

def deposit(self, positions, fields=None, method=None, kernel_name="cubic"):
def deposit(
self,
positions,
fields=None,
method=None,
kernel_name="cubic",
vector_field=False,
):
r"""Operate on the mesh, in a particle-against-mesh fashion, with
exclusively local input.

Expand Down Expand Up @@ -168,12 +175,20 @@ def deposit(self, positions, fields=None, method=None, kernel_name="cubic"):
# Here we perform our particle deposition.
if fields is None:
fields = []

cls = getattr(particle_deposit, f"deposit_{method}", None)
if cls is None:
raise YTParticleDepositionNotImplemented(method)
nz = self.nz
nvals = (nz, nz, nz, (self.domain_ind >= 0).sum())
if np.max(self.domain_ind) >= nvals[-1]:
if vector_field:
vec_size = fields[0].shape[-1] if fields else 1
nvals = nvals + (vec_size,)

if positions.shape[0] < 1:
vals = np.empty(nvals, dtype="float64")
return vals
if np.max(self.domain_ind) >= (self.domain_ind >= 0).sum():
print(
f"nocts, domain_ind >= 0, max {self.oct_handler.nocts} {nvals[-1]} {np.max(self.domain_ind)}"
)
Expand Down Expand Up @@ -507,8 +522,10 @@ def select_ires(self, dobj):
)

def select(self, selector, source, dest, offset):
# Set the dims if we have a vector field
dims = source.shape[-1] if source.ndim == 5 else 1
n = self.oct_handler.selector_fill(
selector, source, dest, offset, domain_id=self.domain_id
selector, source, dest, offset, domain_id=self.domain_id, dims=dims
)
return n

Expand Down
32 changes: 27 additions & 5 deletions yt/data_objects/static_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -1194,7 +1194,7 @@ def find_field_values_at_points(self, fields, coords):
funit = self._get_field_info(field).units
out.append(self.arr(np.empty((len(coords),)), funit))
for coord_index, coord in enumerate(coords):
out[field_index][coord_index] = self.point(coord)[field]
out[field_index][coord_index] = self.point(coord)[field].item()
if len(fields) == 1:
return out[0]
else:
Expand Down Expand Up @@ -1840,7 +1840,12 @@ def add_mesh_sampling_particle_field(self, sample_field, ptype="all"):
return self.index._add_mesh_sampling_particle_field(sample_field, ftype, ptype)

def add_deposited_particle_field(
self, deposit_field, method, kernel_name="cubic", weight_field=None
self,
deposit_field,
method,
kernel_name="cubic",
weight_field=None,
vector_field=False,
):
"""Add a new deposited particle field

Expand All @@ -1866,6 +1871,8 @@ def add_deposited_particle_field(
weight_field : (field_type, field_name) or None
Weighting field name for deposition method `weighted_mean`.
If None, use the particle mass.
vector_field : bool, default False
If True, the deposited field is treated as a vector field.

Returns
-------
Expand All @@ -1877,11 +1884,15 @@ def add_deposited_particle_field(
ptype, deposit_field = deposit_field[0], deposit_field[1]
else:
raise RuntimeError

if weight_field is None:
weight_field = (ptype, "particle_mass")
units = self.field_info[ptype, deposit_field].output_units
take_log = self.field_info[ptype, deposit_field].take_log
if vector_field != self.field_info[ptype, deposit_field].vector_field:
raise RuntimeError(
"vector_field argument does not match the field's vector_field attribute"
)

name_map = {
"sum": "sum",
"std": "std",
Expand Down Expand Up @@ -1911,8 +1922,18 @@ def _deposit_field(data):
fields = [data[ptype, deposit_field]]
if method == "weighted_mean":
fields.append(data[ptype, weight_field])
fields = [np.ascontiguousarray(f) for f in fields]
d = data.deposit(pos, fields, method=method, kernel_name=kernel_name)
# Cython deposition kernels operate on float64 buffers.
fields = [np.ascontiguousarray(f, dtype="float64") for f in fields]
d = data.deposit(
pos,
fields,
method=method,
kernel_name=kernel_name,
# Count deposition tracks number of particles per cell and
# should remain scalar even if the source field is vector-valued.
vector_field=vector_field and method != "count",
)

d = data.ds.arr(d, units=units)
if method == "weighted_mean":
d[np.isnan(d)] = 0.0
Expand All @@ -1925,6 +1946,7 @@ def _deposit_field(data):
units=units,
take_log=take_log,
validators=[ValidateSpatial()],
vector_field=vector_field,
)
return ("deposit", field_name)

Expand Down
6 changes: 5 additions & 1 deletion yt/fields/field_detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,11 @@ def deposit(self, *args, **kwargs):
if isinstance(self.ds, (StreamParticlesDataset, ParticleDataset)):
raise ValueError
rng = np.random.default_rng()
return rng.random((self.nd, self.nd, self.nd))
if kwargs.get("vector_field", False):
shape = (self.nd, self.nd, self.nd, 1)
else:
shape = (self.nd, self.nd, self.nd)
return rng.random(shape)

def mesh_sampling_particle_field(self, *args, **kwargs):
pos = args[0]
Expand Down
51 changes: 51 additions & 0 deletions yt/fields/tests/test_fields.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,9 @@ def test_add_deposited_particle_field():
else:
assert_almost_equal(ret.sum(), ad["io", "particle_mass"].sum())

# Make sure the shapes are correct
assert_equal(ret.ndim, 1)

# Test "weighted_mean" method
fn = base_ds.add_deposited_particle_field(
("io", "particle_ones"), "weighted_mean", weight_field="particle_ones"
Expand All @@ -218,6 +221,54 @@ def test_add_deposited_particle_field():
ret = ad[fn]
# The sum should equal the number of cells that have particles
assert_equal(ret.sum(), np.count_nonzero(ad["deposit", "io_count"]))
assert_equal(ret.ndim, 1)


def test_add_deposited_particle_vector_field():
ds = get_base_ds(1)
ad = ds.all_data()

def vector_field(data):
return np.ones(data["io", "particle_mass"].shape + (10,))

ds.add_field(
("io", "vector_field"),
function=vector_field,
units="",
sampling_type="particle",
vector_field=True,
)

# We need to mention the vector_field=True flag here
assert_raises(
RuntimeError,
ds.add_deposited_particle_field,
("io", "vector_field"),
method="nearest",
)

Ncell = len(ad["index", "dx"])

for method in ("sum", "cic", "nearest"):
fname = ds.add_deposited_particle_field(
("io", "vector_field"), method=method, vector_field=True
)
ad = ds.all_data()
ret = ad[fname]
assert_equal(ret.shape, (Ncell, 10))

if method == "sum":
ref = ds.add_deposited_particle_field(("io", "particle_ones"), method="sum")
ref_data = ad[ref]
assert_equal(ret.sum(), ref_data.sum() * 10)

fname = ds.add_deposited_particle_field(
("io", "vector_field"), method="count", vector_field=True
)
ref_fname = ds.add_deposited_particle_field(("io", "particle_ones"), method="count")
# Note here: some particles may fall outside the domain,
# so we compare wrt the non-vectorized result
assert_equal(ad[fname].sum(), ad[ref_fname].sum())


def test_add_gradient_fields():
Expand Down
6 changes: 3 additions & 3 deletions yt/frontends/artio/_artio_caller.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -1560,7 +1560,7 @@ cdef class ARTIORootMeshContainer:
nf = len(fields)
cdef np.float64_t[::cython.view.indirect, ::1] field_pointers
if nf > 0: field_pointers = OnceIndirect(fields)
cdef np.float64_t[:] field_vals = np.empty(nf, dtype="float64")
cdef np.float64_t[:, :] field_vals = np.empty((nf, 1), dtype="float64")
cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
mask = self.mask(selector, -1)
cdef np.ndarray[np.int64_t, ndim=1] domain_ind
Expand All @@ -1583,7 +1583,7 @@ cdef class ARTIORootMeshContainer:
cdef np.int64_t offset
for i in range(positions.shape[0]):
for j in range(nf):
field_vals[j] = field_pointers[j][i]
field_vals[j,0] = field_pointers[j][i]
for j in range(3):
pos[j] = positions[i, j]
coords[j] = <int>((pos[j] - self.DLE[j])/self.dds[j])
Expand All @@ -1598,7 +1598,7 @@ cdef class ARTIORootMeshContainer:
offset, pos, field_vals, sfc)
if pdeposit.update_values == 1:
for j in range(nf):
field_pointers[j][i] = field_vals[j]
field_pointers[j][i] = field_vals[j,0]

cdef class SFCRangeSelector(SelectorObject):

Expand Down
14 changes: 9 additions & 5 deletions yt/geometry/oct_container.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -511,21 +511,25 @@ cdef class OctreeContainer:
# iterate the same way visit_all_octs does, but we need to track the
# number of octs total visited.
cdef np.int64_t num_cells = -1

vector_field = dims > 1

if not vector_field:
source = source.reshape(np.shape(source) + (1,))

# If source is a vector field, we need to handle it differently.
if dest is None:
# Note that RAMSES can have partial refinement inside an Oct. This
# means we actually do want the number of Octs, not the number of
# cells.
num_cells = selector.count_oct_cells(self, domain_id)
dest = np.zeros((num_cells, dims), dtype=source.dtype,
order='C')
if dims != 1:
raise RuntimeError

# Just make sure that we're in the right shape. Ideally this will not
# duplicate memory. Since we're in Cython, we want to avoid modifying
# the .shape attributes directly.
dest = dest.reshape((num_cells, 1))
source = source.reshape((source.shape[0], source.shape[1],
source.shape[2], source.shape[3], dims))
dest = dest.reshape((num_cells, dims))
cdef OctVisitor visitor
cdef oct_visitors.CopyArrayI64 visitor_i64
cdef oct_visitors.CopyArrayF64 visitor_f64
Expand Down
Loading