|
| 1 | +""" |
| 2 | +Tests for Dyablo frontend. |
| 3 | +""" |
| 4 | + |
| 5 | +import os |
| 6 | + |
| 7 | +import h5py |
| 8 | +import numpy as np |
| 9 | +import numpy.typing as npt |
| 10 | +import pytest |
| 11 | + |
| 12 | +import yt |
| 13 | +from yt.utilities.lib.geometry_utils import compute_morton |
| 14 | + |
| 15 | + |
| 16 | +def create_test_file( |
| 17 | + tmpdir, |
| 18 | + n_blocks: npt.NDArray[np.int_], |
| 19 | + block_size: npt.NDArray[np.int_], |
| 20 | + left_edge: npt.NDArray[np.float64], |
| 21 | + right_edge: npt.NDArray[np.float64], |
| 22 | + seed: int = 42, |
| 23 | +): |
| 24 | + """Create a small test HDF5 file for Dyablo.""" |
| 25 | + n_blocks = np.asarray(n_blocks) |
| 26 | + block_size = np.asarray(block_size) |
| 27 | + left_edge = np.asarray(left_edge) |
| 28 | + right_edge = np.asarray(right_edge) |
| 29 | + |
| 30 | + iteration = 1234567 |
| 31 | + |
| 32 | + fluid_fname = os.path.join(tmpdir, f"test_dyablo_iter{iteration}.h5") |
| 33 | + |
| 34 | + n_cells = np.prod(n_blocks) * np.prod(block_size) |
| 35 | + |
| 36 | + generator = np.random.default_rng(seed=seed) |
| 37 | + |
| 38 | + # Compute vertices of the blocks |
| 39 | + vertex_coordinates = np.mgrid[ |
| 40 | + left_edge[0] : right_edge[0] : (block_size[0] * n_blocks[0] + 1) * 1j, |
| 41 | + left_edge[1] : right_edge[1] : (block_size[1] * n_blocks[1] + 1) * 1j, |
| 42 | + left_edge[2] : right_edge[2] : (block_size[2] * n_blocks[2] + 1) * 1j, |
| 43 | + ] |
| 44 | + |
| 45 | + # Block positions |
| 46 | + dx = (right_edge - left_edge) / n_blocks |
| 47 | + block_inds = np.mgrid[0 : n_blocks[0], 0 : n_blocks[1], 0 : n_blocks[2]] |
| 48 | + block_positions = ( |
| 49 | + left_edge[:, None, None, None] + (block_inds + 0.5) * dx[:, None, None, None] |
| 50 | + ) |
| 51 | + |
| 52 | + # Compute block order **along Morton curve** |
| 53 | + # Dyablo's Morton order uses x as the least-significant bit; yt's |
| 54 | + # compute_morton uses z as LSB, so we swap x and z. |
| 55 | + morton_index = compute_morton( |
| 56 | + block_positions[2].flatten(), |
| 57 | + block_positions[1].flatten(), |
| 58 | + block_positions[0].flatten(), |
| 59 | + left_edge[::-1], |
| 60 | + right_edge[::-1], |
| 61 | + ) |
| 62 | + block_order = np.argsort(morton_index) |
| 63 | + |
| 64 | + # Compute cell ordering within each block **in column-major (Fortran) order**. |
| 65 | + # For each F-order output position, find the corresponding C-order column index |
| 66 | + # in cell_inds.reshape(3,-1). This is the F→C permutation (not C→F). |
| 67 | + ix_f, iy_f, iz_f = np.unravel_index( |
| 68 | + np.arange(np.prod(block_size)), block_size, order="F" |
| 69 | + ) |
| 70 | + |
| 71 | + # Offset w.r.t. the bottom-left-front corner of each cell |
| 72 | + vertex_offset = np.asarray( |
| 73 | + [ |
| 74 | + [0, 0, 0], |
| 75 | + [1, 0, 0], |
| 76 | + [1, 1, 0], |
| 77 | + [0, 1, 0], |
| 78 | + [0, 0, 1], |
| 79 | + [1, 0, 1], |
| 80 | + [1, 1, 1], |
| 81 | + [0, 1, 1], |
| 82 | + ] |
| 83 | + ).T |
| 84 | + |
| 85 | + vertex_inds = ( |
| 86 | + block_inds.reshape(3, -1)[:, block_order][:, :, None, None] |
| 87 | + * block_size[:, None, None, None] |
| 88 | + + np.array([ix_f, iy_f, iz_f])[:, None, :, None] |
| 89 | + + vertex_offset[:, None, None, :] |
| 90 | + ) |
| 91 | + vertex_order = np.ravel_multi_index( |
| 92 | + vertex_inds, vertex_coordinates.shape[1:], order="C" |
| 93 | + ) |
| 94 | + |
| 95 | + with h5py.File(fluid_fname, "w") as f: |
| 96 | + # Create fluid fields |
| 97 | + f["rho"] = generator.random(n_cells) |
| 98 | + f["rho_vx"] = generator.random(n_cells) |
| 99 | + f["rho_vy"] = generator.random(n_cells) |
| 100 | + f["rho_vz"] = generator.random(n_cells) |
| 101 | + f["e_tot"] = generator.random(n_cells) |
| 102 | + f["test_field"] = generator.random(n_cells) |
| 103 | + |
| 104 | + # Create vertices of the blocks |
| 105 | + f["coordinates"] = vertex_coordinates.reshape(3, -1).T |
| 106 | + f["connectivity"] = vertex_order.reshape(-1, 8) |
| 107 | + |
| 108 | + # Create scalar_data |
| 109 | + f.create_group("scalar_data") |
| 110 | + f["scalar_data"].attrs.update({"aexp": 1.0, "iter": iteration, "time": 123.0}) |
| 111 | + |
| 112 | + |
| 113 | +# Expected fluid fields written by create_test_file |
| 114 | +_FLUID_FIELDS = ["rho", "rho_vx", "rho_vy", "rho_vz", "e_tot", "test_field"] |
| 115 | + |
| 116 | + |
| 117 | +@pytest.mark.parametrize( |
| 118 | + "n_blocks, block_size", |
| 119 | + [ |
| 120 | + # cubic, minimal 2×2×2 (block_size=1 is ambiguous to inference, start from 2) |
| 121 | + ([2, 2, 2], [2, 2, 2]), |
| 122 | + ([2, 2, 2], [4, 4, 4]), |
| 123 | + # non-cubic (N≠M≠L), uniform n_blocks |
| 124 | + ([4, 4, 4], [3, 4, 5]), |
| 125 | + # asymmetric n_blocks (all dims ≥ 2; avoid z-neighbor-as-block-1 orderings) |
| 126 | + ([3, 4, 6], [4, 4, 4]), |
| 127 | + ([2, 4, 6], [3, 5, 7]), |
| 128 | + ], |
| 129 | +) |
| 130 | +def test_dyablo_mock_dataset(tmp_path, n_blocks, block_size): |
| 131 | + n_blocks = np.asarray(n_blocks) |
| 132 | + block_size = np.asarray(block_size) |
| 133 | + |
| 134 | + create_test_file( |
| 135 | + str(tmp_path), |
| 136 | + n_blocks=n_blocks, |
| 137 | + block_size=block_size, |
| 138 | + left_edge=np.zeros(3), |
| 139 | + right_edge=np.ones(3), |
| 140 | + ) |
| 141 | + |
| 142 | + fpath = next(tmp_path.glob("*.h5")) |
| 143 | + yt.mylog.setLevel(50) |
| 144 | + ds = yt.load(str(fpath)) |
| 145 | + |
| 146 | + # Check inferred block_size and n_blocks |
| 147 | + np.testing.assert_array_equal( |
| 148 | + ds.block_size, |
| 149 | + block_size, |
| 150 | + err_msg=f"block_size mismatch: got {ds.block_size}, expected {block_size}", |
| 151 | + ) |
| 152 | + np.testing.assert_array_equal( |
| 153 | + ds.domain_dimensions, |
| 154 | + n_blocks * block_size, |
| 155 | + err_msg=f"domain_dimensions mismatch: got {ds.domain_dimensions}, expected {n_blocks * block_size}", |
| 156 | + ) |
| 157 | + |
| 158 | + ad = ds.all_data() |
| 159 | + expected_n_cells = int(np.prod(n_blocks) * np.prod(block_size)) |
| 160 | + |
| 161 | + # Check all fluid fields are readable and have correct size |
| 162 | + for fname in _FLUID_FIELDS: |
| 163 | + data = ad["dyablo", fname] |
| 164 | + assert data.shape == (expected_n_cells,), ( |
| 165 | + f"Field {fname}: expected {expected_n_cells} cells, got {data.shape}" |
| 166 | + ) |
| 167 | + assert not np.any(np.isnan(data)), f"Field {fname} contains NaN values" |
| 168 | + |
| 169 | + # Check derived density field |
| 170 | + rho = ad["gas", "density"] |
| 171 | + assert rho.shape == (expected_n_cells,) |
| 172 | + assert not np.any(np.isnan(rho)) |
| 173 | + |
| 174 | + |
| 175 | +def test_dyablo_mock_dataset_edges(tmp_path): |
| 176 | + """Test that arbitrary left_edge and right_edge are preserved correctly.""" |
| 177 | + n_blocks = np.asarray([2, 2, 2]) |
| 178 | + block_size = np.asarray([4, 4, 4]) |
| 179 | + left_edge = np.array([-3.0, 0.5, 1.0]) |
| 180 | + right_edge = np.array([5.0, 4.5, 7.0]) |
| 181 | + |
| 182 | + create_test_file( |
| 183 | + str(tmp_path), |
| 184 | + n_blocks=n_blocks, |
| 185 | + block_size=block_size, |
| 186 | + left_edge=left_edge, |
| 187 | + right_edge=right_edge, |
| 188 | + ) |
| 189 | + |
| 190 | + fpath = next(tmp_path.glob("*.h5")) |
| 191 | + yt.mylog.setLevel(50) |
| 192 | + ds = yt.load(str(fpath)) |
| 193 | + |
| 194 | + np.testing.assert_allclose(ds.domain_left_edge.d, left_edge) |
| 195 | + np.testing.assert_allclose(ds.domain_right_edge.d, right_edge) |
| 196 | + |
| 197 | + ad = ds.all_data() |
| 198 | + rho = ad["gas", "density"] |
| 199 | + assert rho.shape == (int(np.prod(n_blocks) * np.prod(block_size)),) |
| 200 | + assert not np.any(np.isnan(rho)) |
0 commit comments