|
| 1 | +#! /usr/bin/env python |
| 2 | +# -*- coding: utf-8 -*- |
| 3 | + |
| 4 | + |
| 5 | +import h5py |
| 6 | +import sys |
| 7 | +import numpy as np |
| 8 | +import os |
| 9 | + |
| 10 | + |
| 11 | +def BtoFlatPrimal(ph_bx, ph_by, ph_bz, npx, npy, npz, gn=2): |
| 12 | + |
| 13 | + nbrPoints = npx * npy * npz |
| 14 | + b = np.zeros((nbrPoints, 3), dtype="f") |
| 15 | + |
| 16 | + # pure primal arrays |
| 17 | + bx = np.zeros((npx, npy, npz), dtype=np.float32) |
| 18 | + by = np.zeros((npx, npy, npz), dtype=np.float32) |
| 19 | + bz = np.zeros((npx, npy, npz), dtype=np.float32) |
| 20 | + |
| 21 | + # converts yee to pure primal |
| 22 | + # we average in the dual direction so we need one extract ghost data in that direction |
| 23 | + bx[:, :, 0] = ( |
| 24 | + ph_bx[gn:-gn, gn - 1 : -gn + 1][:, 1:] + ph_bx[gn:-gn, gn - 1 : -gn + 1][:, :-1] |
| 25 | + ) * 0.5 |
| 26 | + by[:, :, 0] = ( |
| 27 | + ph_by[gn - 1 : -gn + 1, gn:-gn][1:, :] + ph_by[gn - 1 : -gn + 1, gn:-gn][:-1, :] |
| 28 | + ) * 0.5 |
| 29 | + bz[:, :, 0] = ( |
| 30 | + ph_bz[gn - 1 : -gn + 1, gn - 1 : -gn + 1][1:, 1:] |
| 31 | + + ph_bz[gn - 1 : -gn + 1, gn - 1 : -gn + 1][:-1, :-1] |
| 32 | + ) * 0.5 |
| 33 | + |
| 34 | + bx[:, :, 1] = bx[:, :, 0] |
| 35 | + by[:, :, 1] = by[:, :, 0] |
| 36 | + bz[:, :, 1] = bz[:, :, 0] |
| 37 | + |
| 38 | + b[:, 0] = bx.flatten(order="F") |
| 39 | + b[:, 1] = by.flatten(order="F") |
| 40 | + b[:, 2] = bz.flatten(order="F") |
| 41 | + |
| 42 | + return b |
| 43 | + |
| 44 | + |
| 45 | +def EtoFlatPrimal(ph_ex, ph_ey, ph_ez, npx, npy, npz, gn=2): |
| 46 | + |
| 47 | + nbrPoints = npx * npy * npz |
| 48 | + e = np.zeros((nbrPoints, 3), dtype="f") |
| 49 | + |
| 50 | + # pure primal arrays |
| 51 | + ex = np.zeros((npx, npy, npz), dtype=np.float32) |
| 52 | + ey = np.zeros((npx, npy, npz), dtype=np.float32) |
| 53 | + ez = np.zeros((npx, npy, npz), dtype=np.float32) |
| 54 | + |
| 55 | + # converts yee to pure primal |
| 56 | + # we average in the dual direction so we need one extract ghost data in that direction |
| 57 | + ex[:, :, 0] = ( |
| 58 | + ph_ex[gn - 1 : -gn + 1, gn:-gn][1:, :] + ph_ex[gn - 1 : -gn + 1, gn:-gn][:-1, :] |
| 59 | + ) * 0.5 |
| 60 | + |
| 61 | + ey[:, :, 0] = ( |
| 62 | + ph_ey[gn:-gn, gn - 1 : -gn + 1][:, 1:] + ph_ey[gn:-gn, gn - 1 : -gn + 1][:, :-1] |
| 63 | + ) * 0.5 |
| 64 | + |
| 65 | + # ez already primal in 2D |
| 66 | + ez[:, :, 0] = ph_ez[gn:-gn, gn:-gn][:, :] |
| 67 | + |
| 68 | + ex[:, :, 1] = ex[:, :, 0] |
| 69 | + ey[:, :, 1] = ey[:, :, 0] |
| 70 | + ez[:, :, 1] = ez[:, :, 0] |
| 71 | + |
| 72 | + e[:, 0] = ex.flatten(order="F") |
| 73 | + e[:, 1] = ey.flatten(order="F") |
| 74 | + e[:, 2] = ez.flatten(order="F") |
| 75 | + |
| 76 | + return e |
| 77 | + |
| 78 | + |
| 79 | +def primalScalarToFlatPrimal(ph_scalar, npx, npy, npz, gn=2): |
| 80 | + |
| 81 | + scalar3d = np.zeros((npx, npy, npz), dtype="f") |
| 82 | + scalar3d[:, :, 0] = ph_scalar[gn:-gn, gn:-gn] |
| 83 | + scalar3d[:, :, 1] = ph_scalar[gn:-gn, gn:-gn] |
| 84 | + return scalar3d.flatten(order="F") |
| 85 | + |
| 86 | + |
| 87 | +def primalVectorToFlatPrimal(ph_vx, ph_vy, ph_vz, npx, npy, npz, gn=2): |
| 88 | + nbrPoints = npx * npy * npz |
| 89 | + v = np.zeros((nbrPoints, 3), dtype="f") |
| 90 | + |
| 91 | + vx = primalScalarToFlatPrimal(ph_vx, npx, npy, npz, gn) |
| 92 | + vy = primalScalarToFlatPrimal(ph_vy, npx, npy, npz, gn) |
| 93 | + vz = primalScalarToFlatPrimal(ph_vz, npx, npy, npz, gn) |
| 94 | + |
| 95 | + v[:, 0] = vx.flatten(order="F") |
| 96 | + v[:, 1] = vy.flatten(order="F") |
| 97 | + v[:, 2] = vz.flatten(order="F") |
| 98 | + |
| 99 | + return v |
| 100 | + |
| 101 | + |
| 102 | +def boxFromPatch(patch): |
| 103 | + lower = patch.attrs["lower"] |
| 104 | + upper = patch.attrs["upper"] |
| 105 | + return [lower[0], upper[0], lower[1], upper[1], 0, 0] # 2D |
| 106 | + |
| 107 | + |
| 108 | +def nbrNodes(box): |
| 109 | + lower = box[0], box[2], box[4] |
| 110 | + upper = box[1], box[3], box[5] |
| 111 | + npx = upper[0] - lower[0] + 1 + 1 |
| 112 | + npy = upper[1] - lower[1] + 1 + 1 |
| 113 | + npz = upper[2] - lower[2] + 1 + 1 |
| 114 | + return npx, npy, npz |
| 115 | + |
| 116 | + |
| 117 | +def primalFlattener(diag_filename): |
| 118 | + if "_B" in diag_filename: |
| 119 | + print("Converting B fields") |
| 120 | + return BtoFlatPrimal |
| 121 | + elif "_E" in diag_filename: |
| 122 | + print("Converting E fields") |
| 123 | + return EtoFlatPrimal |
| 124 | + elif "_bulkVelocity" in diag_filename: |
| 125 | + print("Converting bulk velocity") |
| 126 | + return primalVectorToFlatPrimal |
| 127 | + elif "_density" in diag_filename: |
| 128 | + print("Converting ion density") |
| 129 | + return primalScalarToFlatPrimal |
| 130 | + else: |
| 131 | + raise ValueError(f"Unknown diagnostic {diag_filename}") |
| 132 | + |
| 133 | + |
| 134 | +def max_nbr_levels_in(phare_h5): |
| 135 | + max_nbr_level = 0 |
| 136 | + times_str = list(phare_h5["t"].keys()) |
| 137 | + for time in times_str: |
| 138 | + nbrLevels = len(phare_h5["t"][time].keys()) |
| 139 | + if max_nbr_level < nbrLevels: |
| 140 | + max_nbr_level = nbrLevels |
| 141 | + return max_nbr_level |
| 142 | + |
| 143 | + |
| 144 | +def times_in(phare_h5): |
| 145 | + times_str = list(phare_h5["t"].keys()) |
| 146 | + times = np.asarray([float(time) for time in times_str]) |
| 147 | + times.sort() |
| 148 | + return times |
| 149 | + |
| 150 | + |
| 151 | +def is_vector_data(patch): |
| 152 | + return len(patch.keys()) == 3 |
| 153 | + |
| 154 | + |
| 155 | +def main(): |
| 156 | + |
| 157 | + path = sys.argv[1] |
| 158 | + phare_h5 = h5py.File(path, "r") |
| 159 | + root_spacing = phare_h5.attrs["cell_width"] |
| 160 | + times = times_in(phare_h5) |
| 161 | + max_nbr_level = max_nbr_levels_in(phare_h5) |
| 162 | + numberOfTimes = times.size |
| 163 | + |
| 164 | + phare_fn = os.path.basename(path).split(".")[0] |
| 165 | + data_directory = os.path.dirname(path) |
| 166 | + vtk_fn = f"{data_directory}/{phare_fn}.vtkhdf" |
| 167 | + |
| 168 | + toFlatPrimal = primalFlattener(phare_fn) |
| 169 | + |
| 170 | + print("PHARE H5 to VTKHDF conversion") |
| 171 | + print("-----------------------------") |
| 172 | + print(f"Converting {phare_fn} to {vtk_fn} in {data_directory}") |
| 173 | + print(f"Max number of levels : {max_nbr_level}") |
| 174 | + print(f"Number of time steps : {numberOfTimes}") |
| 175 | + |
| 176 | + vtk = h5py.File(vtk_fn, "w") |
| 177 | + root = vtk.create_group("VTKHDF", track_order=True) |
| 178 | + root.attrs["Version"] = (2, 2) |
| 179 | + type = b"OverlappingAMR" |
| 180 | + root.attrs.create("Type", type, dtype=h5py.string_dtype("ascii", len(type))) |
| 181 | + description = b"XYZ" |
| 182 | + root.attrs.create( |
| 183 | + "GridDescription", |
| 184 | + description, |
| 185 | + dtype=h5py.string_dtype("ascii", len(description)), |
| 186 | + ) |
| 187 | + origin = [0, 0, 0] |
| 188 | + root.attrs.create("Origin", origin, dtype="f") |
| 189 | + |
| 190 | + steps = root.create_group("Steps") |
| 191 | + steps.attrs.create("NSteps", data=numberOfTimes, dtype="i8") |
| 192 | + steps.create_dataset("Values", data=times[:numberOfTimes]) |
| 193 | + |
| 194 | + for ilvl in range(max_nbr_level)[:]: |
| 195 | + |
| 196 | + print(f"Processing level {ilvl}") |
| 197 | + |
| 198 | + lvl = root.create_group(f"Level{ilvl}") |
| 199 | + lvl_spacing = root_spacing |
| 200 | + lvl_spacing = [dl / 2**ilvl for dl in lvl_spacing] + [0.0] |
| 201 | + lvl.attrs.create("Spacing", lvl_spacing, dtype="f") |
| 202 | + steps_lvl = steps.create_group(f"Level{ilvl}") |
| 203 | + |
| 204 | + AMRBox = [] |
| 205 | + step_nbrBoxes = [] |
| 206 | + AMRBoxOffsets = [] |
| 207 | + dataOffsets = [] |
| 208 | + |
| 209 | + cellData_g = lvl.create_group("CellData") |
| 210 | + pointData_g = lvl.create_group("PointData") |
| 211 | + fieldData_g = lvl.create_group("FieldData") |
| 212 | + cellDataOffset_g = steps_lvl.create_group("CellDataOffset") |
| 213 | + pointDataOffset_g = steps_lvl.create_group("PointDataOffset") |
| 214 | + FieldDataOffset_g = steps_lvl.create_group("FieldDataOffset") |
| 215 | + |
| 216 | + first = True |
| 217 | + |
| 218 | + # these are values for the first time |
| 219 | + # they will be reset for other times |
| 220 | + current_size = 0 |
| 221 | + amr_box_offset = 0 |
| 222 | + |
| 223 | + for time in times[:numberOfTimes]: |
| 224 | + nbr_boxes = 0 |
| 225 | + print(f"time {time}") |
| 226 | + time_str = f"{time:.10f}" |
| 227 | + phare_lvl_name = f"pl{ilvl}" |
| 228 | + dataOffsets += [current_size] |
| 229 | + AMRBoxOffsets += [amr_box_offset] |
| 230 | + # pass this time if this level does not exist |
| 231 | + if phare_lvl_name not in phare_h5["t"][time_str].keys(): |
| 232 | + print(f"no level {ilvl} at time {time}") |
| 233 | + continue |
| 234 | + phare_lvl = phare_h5["t"][time_str][phare_lvl_name] |
| 235 | + |
| 236 | + for patch_id in list(phare_lvl.keys())[:]: |
| 237 | + # print(f"patch {patch_id}") |
| 238 | + patch = phare_lvl[patch_id] |
| 239 | + |
| 240 | + if is_vector_data(patch): |
| 241 | + x_name, y_name, z_name = list(patch.keys()) |
| 242 | + ph_x = patch[x_name][:] |
| 243 | + ph_y = patch[y_name][:] |
| 244 | + ph_z = patch[z_name][:] |
| 245 | + |
| 246 | + box = boxFromPatch(patch) |
| 247 | + AMRBox.append(box) |
| 248 | + nbr_boxes += 1 |
| 249 | + npx, npy, npz = nbrNodes(box) |
| 250 | + data = toFlatPrimal(ph_x, ph_y, ph_z, npx, npy, npz) |
| 251 | + |
| 252 | + if first: |
| 253 | + # this is the first patch of the first time |
| 254 | + # for this level, we need to create the dataset |
| 255 | + # the first dimension is the total # of points |
| 256 | + # which is unknown hence the None for maxshape |
| 257 | + pointData_b = pointData_g.create_dataset( |
| 258 | + "data", data=data, maxshape=(None, 3) |
| 259 | + ) |
| 260 | + first = False |
| 261 | + |
| 262 | + else: |
| 263 | + # dataset already created with shape (current_size,3) |
| 264 | + # we add b.shape[0] points (=npx*npy) to the first dim |
| 265 | + # hence need to resize the dataset. |
| 266 | + pointData_b.resize(current_size + data.shape[0], axis=0) |
| 267 | + pointData_b[current_size:, :] = data |
| 268 | + # pass |
| 269 | + |
| 270 | + current_size += data.shape[0] |
| 271 | + else: |
| 272 | + assert len(patch.keys()) == 1 |
| 273 | + dataset_name = list(patch.keys())[0] |
| 274 | + ph_data = patch[dataset_name][:] |
| 275 | + |
| 276 | + box = boxFromPatch(patch) |
| 277 | + AMRBox.append(box) |
| 278 | + nbr_boxes += 1 |
| 279 | + npx, npy, npz = nbrNodes(box) |
| 280 | + data = toFlatPrimal(ph_data, npx, npy, npz) |
| 281 | + |
| 282 | + if first: |
| 283 | + # this is the first patch of the first time |
| 284 | + # for this level, we need to create the dataset |
| 285 | + # the first dimension is the total # of points |
| 286 | + # which is unknown hence the None for maxshape |
| 287 | + |
| 288 | + # 1D resizable datasets maxshape MUST be (None,) tuple |
| 289 | + pointData_b = pointData_g.create_dataset( |
| 290 | + "data", |
| 291 | + data=data, |
| 292 | + maxshape=(None,), |
| 293 | + ) |
| 294 | + first = False |
| 295 | + |
| 296 | + else: |
| 297 | + # dataset already created with shape (current_size,3) |
| 298 | + # we add b.shape[0] points (=npx*npy) to the first dim |
| 299 | + # hence need to resize the dataset. |
| 300 | + pointData_b.resize(current_size + data.shape[0], axis=0) |
| 301 | + pointData_b[current_size:] = data |
| 302 | + # pass |
| 303 | + |
| 304 | + current_size += data.shape[0] |
| 305 | + |
| 306 | + # of of the patch loops at that time |
| 307 | + |
| 308 | + # number of boxes added for ilvl across all times |
| 309 | + amr_box_offset += nbr_boxes |
| 310 | + step_nbrBoxes += [nbr_boxes] |
| 311 | + |
| 312 | + lvl.create_dataset("AMRBox", data=AMRBox) |
| 313 | + steps_lvl.create_dataset("AMRBoxOffset", data=AMRBoxOffsets) |
| 314 | + steps_lvl.create_dataset("NumberOfAMRBox", data=step_nbrBoxes) |
| 315 | + pointDataOffset_g.create_dataset("data", data=dataOffsets) |
| 316 | + |
| 317 | + vtk.close() |
| 318 | + |
| 319 | + |
| 320 | +if __name__ == "__main__": |
| 321 | + main() |
0 commit comments