Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 1 addition & 3 deletions CONTRIBUTING.rst
Original file line number Diff line number Diff line change
Expand Up @@ -720,9 +720,7 @@ Source code style guide
* Do not use nested classes unless you have a very good reason to, such as
requiring a namespace or class-definition modification. Classes should live
at the top level. ``__metaclass__`` is exempt from this.
* Avoid copying memory when possible. For example, don't do
``a = a.reshape(3, 4)`` when ``a.shape = (3, 4)`` will do, and ``a = a * 3``
should be ``np.multiply(a, 3, a)``.
* Avoid copying memory when possible.
* In general, avoid all double-underscore method names: ``__something`` is
usually unnecessary.
* When writing a subclass, use the super built-in to access the super class,
Expand Down
3 changes: 1 addition & 2 deletions yt/data_objects/construction_data_containers.py
Original file line number Diff line number Diff line change
Expand Up @@ -2643,8 +2643,7 @@ def _export_ply(
f.write(b"end_header\n")
v.tofile(f)
arr["ni"][:] = 3
vi = np.arange(nv, dtype="<i")
vi.shape = (nv // 3, 3)
vi = np.arange(nv, dtype="<i").reshape(nv // 3, 3)
arr["v1"][:] = vi[:, 0]
arr["v2"][:] = vi[:, 1]
arr["v3"][:] = vi[:, 2]
Expand Down
33 changes: 18 additions & 15 deletions yt/fields/field_detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -276,18 +276,19 @@ def fcoords(self):
np.mgrid[0 : 1 : self.nd * 1j, 0 : 1 : self.nd * 1j, 0 : 1 : self.nd * 1j]
)
if self.flat:
fc.shape = (self.nd * self.nd * self.nd, 3)
fc = fc.reshape(self.nd * self.nd * self.nd, 3)
else:
fc = fc.transpose()
return self.ds.arr(fc, units="code_length")

@property
def fcoords_vertex(self):
rng = np.random.default_rng()
fc = rng.random((self.nd, self.nd, self.nd, 8, 3))
if self.flat:
fc.shape = (self.nd * self.nd * self.nd, 8, 3)
return self.ds.arr(fc, units="code_length")
shape = (self.nd * self.nd * self.nd, 8, 3)
else:
shape = (self.nd, self.nd, self.nd, 8, 3)
rng = np.random.default_rng()
return self.ds.arr(rng.random(shape), units="code_length")

@property
def icoords(self):
Expand All @@ -297,21 +298,23 @@ def icoords(self):
0 : self.nd - 1 : self.nd * 1j,
]
if self.flat:
ic.shape = (self.nd * self.nd * self.nd, 3)
return ic.reshape(self.nd * self.nd * self.nd, 3)
else:
ic = ic.transpose()
return ic
return ic.transpose()

@property
def ires(self):
ir = np.ones(self.nd**3, dtype="int64")
if not self.flat:
ir.shape = (self.nd, self.nd, self.nd)
return ir
if self.flat:
shape = (self.nd**3,)
else:
shape = (self.nd, self.nd, self.nd)
return np.ones(shape, dtype="int64")

@property
def fwidth(self):
fw = np.ones((self.nd**3, 3), dtype="float64") / self.nd
if not self.flat:
fw.shape = (self.nd, self.nd, self.nd, 3)
if self.flat:
shape = (self.nd**3, 3)
else:
shape = (self.nd, self.nd, self.nd, 3)
fw = np.full(shape, 1 / self.nd, dtype="float64")
return self.ds.arr(fw, units="code_length")
3 changes: 1 addition & 2 deletions yt/fields/geometric_fields.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,8 +134,7 @@ def _morton_index(data):
data["index", "z"].ravel(),
LE,
RE,
)
morton.shape = data["index", "x"].shape
).reshape(data["index", "x"].shape)
return morton.view("f8")

registry.add_field(
Expand Down
9 changes: 4 additions & 5 deletions yt/frontends/amrvac/datfile_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,11 +152,10 @@ def get_single_block_data(istream, byte_offset, block_shape):
def get_single_block_field_data(istream, byte_offset, block_shape, field_idx):
"""retrieve a specific block (ONE field) from a datfile"""
# compute byte size of a single field
field_shape = block_shape[:-1]
fmt = ALIGN + np.prod(field_shape) * "d"
field_shape = block_shape[:-1][::-1]
count = np.prod(field_shape)
fmt = ALIGN + count * "d"
byte_size_field = struct.calcsize(fmt)

istream.seek(byte_offset + byte_size_field * field_idx)
data = np.fromfile(istream, "=f8", count=np.prod(field_shape))
data.shape = field_shape[::-1]
return data.T
return np.fromfile(istream, dtype="=f8", count=count).reshape(field_shape).T
6 changes: 2 additions & 4 deletions yt/frontends/amrvac/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,14 +103,12 @@ def _read_data(self, fid, grid, field):
offset = grid._index.block_offsets[ileaf]
field_idx = self.ds.parameters["w_names"].index(field)

field_shape = self.block_shape[:-1]
field_shape = self.block_shape[:-1][::-1]
count = np.prod(field_shape)
byte_size_field = count * 8 # size of a double

fid.seek(offset + byte_size_field * field_idx)
data = np.fromfile(fid, "=f8", count=count)
data.shape = field_shape[::-1]
data = data.T
data = np.fromfile(fid, dtype="=f8", count=count).reshape(field_shape).T
# Always convert data to 3D, as grid.ActiveDimensions is always 3D
while len(data.shape) < 3:
data = data[..., np.newaxis]
Expand Down
2 changes: 1 addition & 1 deletion yt/frontends/chimera/data_structures.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def _initialize_mesh(self):
mylog.warning(
"Yin-Yang File Detected; This data is not currently supported."
)
coords.shape = (nxd * nyd * nzd, 3)
coords = coords.reshape(nxd * nyd * nzd, 3)
# Connectivity is an array of rows, each of which corresponds to a grid cell.
# The 8 elements of each row are integers representing the cell vertices.
# These integers reference the numerical index of the element of the
Expand Down
2 changes: 1 addition & 1 deletion yt/frontends/exodus_ii/data_structures.py
Original file line number Diff line number Diff line change
Expand Up @@ -361,7 +361,7 @@ def _read_connectivity(self):
raise NotImplementedError("only equal-size polyhedra supported")
q, r = np.divmod(len(conn), npe)
assert r == 0
conn.shape = (q, npe)
conn = conn.reshape(q, npe)
connectivity.append(conn)
return connectivity

Expand Down
7 changes: 4 additions & 3 deletions yt/frontends/gadget/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -462,10 +462,11 @@ def _yield_coordinates(self, data_file, needed_ptype=None):
if needed_ptype is not None and ptype != needed_ptype:
continue
# The first total_particles * 3 values are positions
pp = np.fromfile(f, dtype=dt, count=count * 3).astype(
dt_native, copy=False
pp = (
np.fromfile(f, dtype=dt, count=count * 3)
.reshape(count, 3)
.astype(dt_native, copy=False)
)
pp.shape = (count, 3)
yield ptype, pp

def _get_smoothing_length(self, data_file, position_dtype, position_shape):
Expand Down
6 changes: 3 additions & 3 deletions yt/frontends/http_stream/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def _read_particle_coords(self, chunks, ptf):
for ptype in ptf:
s = self._open_stream(data_file, (ptype, "Coordinates"))
c = np.frombuffer(s, dtype="float64")
c.shape = (c.shape[0] / 3.0, 3)
c = c.reshape(c.size // 3, 3)
yield ptype, (c[:, 0], c[:, 1], c[:, 2]), 0.0

def _read_particle_fields(self, chunks, ptf, selector):
Expand All @@ -46,7 +46,7 @@ def _read_particle_fields(self, chunks, ptf, selector):
for ptype, field_list in sorted(ptf.items()):
s = self._open_stream(data_file, (ptype, "Coordinates"))
c = np.frombuffer(s, dtype="float64")
c.shape = (c.shape[0] / 3.0, 3)
c = c.reshape(c.size // 3, 3)
mask = selector.select_points(c[:, 0], c[:, 1], c[:, 2], 0.0)
del c
if mask is None:
Expand All @@ -55,7 +55,7 @@ def _read_particle_fields(self, chunks, ptf, selector):
s = self._open_stream(data_file, (ptype, field))
c = np.frombuffer(s, dtype="float64")
if field in self._vector_fields:
c.shape = (c.shape[0] / 3.0, 3)
c = c.reshape(c.size // 3, 3)
data = c[mask, ...]
yield (ptype, field), data

Expand Down
8 changes: 4 additions & 4 deletions yt/frontends/open_pmd/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,13 +189,13 @@ def _read_fluid_selection(self, chunks, selector, fields, size):
continue
component = fname.replace("_", "/").replace("-", "_")
if component.split("/")[0] not in grid.ftypes:
data = np.full(grid.ActiveDimensions, 0, dtype=np.float64)
data = np.full_like(mask, 0)
else:
data = get_component(ds, component, grid.findex, grid.foffset)
# Workaround - casts a 2D (x,y) array to 3D (x,y,1)
data = data.reshape(mask.shape)

# The following is a modified AMRGridPatch.select(...)
data.shape = (
mask.shape
) # Workaround - casts a 2D (x,y) array to 3D (x,y,1)
count = grid.count(selector)
rv[field][ind[field] : ind[field] + count] = data[mask]
ind[field] += count
Expand Down
24 changes: 17 additions & 7 deletions yt/frontends/stream/data_structures.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import weakref
from collections import UserDict
from functools import cached_property
from itertools import chain, product, repeat
from itertools import chain, repeat
from numbers import Number as numeric_type

import numpy as np
Expand Down Expand Up @@ -650,10 +650,19 @@ def exists(fname):
self.num_neighbors = n_neighbors


_cis = np.fromiter(
chain.from_iterable(product([0, 1], [0, 1], [0, 1])), dtype=np.int64, count=8 * 3
_cis = np.array(
[
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
],
dtype="int64",
)
_cis.shape = (8, 3)


def hexahedral_connectivity(xgrid, ygrid, zgrid):
Expand Down Expand Up @@ -713,9 +722,10 @@ def hexahedral_connectivity(xgrid, ygrid, zgrid):
coords[:, :, :, 0] = xgrid[:, None, None]
coords[:, :, :, 1] = ygrid[None, :, None]
coords[:, :, :, 2] = zgrid[None, None, :]
coords.shape = (nx * ny * nz, 3)
cycle = np.rollaxis(np.indices((nx - 1, ny - 1, nz - 1)), 0, 4)
cycle.shape = ((nx - 1) * (ny - 1) * (nz - 1), 3)
coords = coords.reshape(nx * ny * nz, 3)
cycle = np.rollaxis(np.indices((nx - 1, ny - 1, nz - 1)), 0, 4).reshape(
(nx - 1) * (ny - 1) * (nz - 1), 3
)
off = _cis + cycle[:, np.newaxis]
connectivity = np.array(
((off[:, :, 0] * ny) + off[:, :, 1]) * nz + off[:, :, 2], order="C"
Expand Down
12 changes: 4 additions & 8 deletions yt/utilities/linear_interpolators.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,7 @@ def __call__(self, data_object):

my_vals = np.zeros(x_vals.shape, dtype="float64")
lib.UnilinearlyInterpolate(self.table, x_vals, self.x_bins, x_i, my_vals)
my_vals.shape = orig_shape
return my_vals
return my_vals.reshape(orig_shape)


class BilinearFieldInterpolator:
Expand Down Expand Up @@ -141,8 +140,7 @@ def __call__(self, data_object):
lib.BilinearlyInterpolate(
self.table, x_vals, y_vals, self.x_bins, self.y_bins, x_i, y_i, my_vals
)
my_vals.shape = orig_shape
return my_vals
return my_vals.reshape(orig_shape)


class TrilinearFieldInterpolator:
Expand Down Expand Up @@ -242,8 +240,7 @@ def __call__(self, data_object):
z_i,
my_vals,
)
my_vals.shape = orig_shape
return my_vals
return my_vals.reshape(orig_shape)


class QuadrilinearFieldInterpolator:
Expand Down Expand Up @@ -354,8 +351,7 @@ def __call__(self, data_object):
w_i,
my_vals,
)
my_vals.shape = orig_shape
return my_vals
return my_vals.reshape(orig_shape)


def get_centers(ds, filename, center_cols, radius_col, unit="1"):
Expand Down
10 changes: 5 additions & 5 deletions yt/visualization/image_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,10 +142,11 @@ def write_bitmap(bitmap_array, filename, max_val=None, transpose=False):
if bitmap_array.dtype != np.uint8:
s1, s2 = bitmap_array.shape[:2]
if bitmap_array.shape[-1] == 3:
alpha_channel = 255 * np.ones((s1, s2, 1), dtype="uint8")
alpha_channel = np.full((s1, s2, 1), 255, dtype="uint8")
else:
alpha_channel = (255 * bitmap_array[:, :, 3]).astype("uint8")
alpha_channel.shape = s1, s2, 1
alpha_channel = (
(255 * bitmap_array[:, :, 3]).astype("uint8").reshape(s1, s2, 1)
)
if max_val is None:
max_val = bitmap_array[:, :, :3].max()
if max_val == 0.0:
Expand Down Expand Up @@ -260,8 +261,7 @@ def map_to_colors(buff, cmap_name):
shape = buff.shape
# We add float_eps so that digitize doesn't go out of bounds
x = np.mgrid[0.0 : 1.0 + np.finfo(np.float32).eps : lut[0].shape[0] * 1j]
inds = np.digitize(buff.ravel(), x)
inds.shape = (shape[0], shape[1])
inds = np.digitize(buff.ravel(), x).reshape(shape[0], shape[1])
mapped = np.dstack([(v[inds] * 255).astype("uint8") for v in lut])
del inds
else:
Expand Down
Loading