|
| 1 | +from __future__ import annotations |
| 2 | +import numpy as np |
| 3 | +from typing import TYPE_CHECKING |
| 4 | +from mpi4py import MPI |
| 5 | +import sys |
| 6 | + |
| 7 | +if TYPE_CHECKING: |
| 8 | + from .partition import Partition |
| 9 | + from .spherical_partition import S2Partition |
| 10 | + |
| 11 | +ParticleDataT = dict[str, np.ndarray] |
| 12 | + |
| 13 | + |
| 14 | +def distribute_dataset_by_home( |
| 15 | + partition: Partition | S2Partition, |
| 16 | + data: ParticleDataT, |
| 17 | + home_idx: np.ndarray, |
| 18 | + *, |
| 19 | + verbose: int = 0, |
| 20 | + verify_count: bool = True, |
| 21 | + all2all_iterations: int = 1, |
| 22 | +) -> ParticleDataT: |
| 23 | + total_to_send = len(home_idx) |
| 24 | + nperiteration = total_to_send // all2all_iterations |
| 25 | + data_new_list: list[ParticleDataT] = [] |
| 26 | + |
| 27 | + # Some general assertions that every rank has valid data |
| 28 | + keys = list(data.keys()) |
| 29 | + keys_0 = partition.comm.bcast(keys, root=0) |
| 30 | + assert len(keys) == len(keys_0) |
| 31 | + assert all(k in keys_0 for k in keys) |
| 32 | + dtype_string = "".join(data[k].dtype.char for k in keys_0) |
| 33 | + dtype_string_0 = partition.comm.bcast(dtype_string, root=0) |
| 34 | + assert dtype_string == dtype_string_0 |
| 35 | + |
| 36 | + for i in range(all2all_iterations): |
| 37 | + start_idx = i * nperiteration |
| 38 | + end_idx = ( |
| 39 | + (i + 1) * nperiteration if i < all2all_iterations - 1 else total_to_send |
| 40 | + ) |
| 41 | + if partition.rank == 0 and verbose > 0: |
| 42 | + print(f" - Distributing particles iteration {i + 1}/{all2all_iterations}") |
| 43 | + _data = {k: v[start_idx:end_idx] for k, v in data.items()} |
| 44 | + _home_idx = home_idx[start_idx:end_idx] |
| 45 | + _data = _distribute_dataset_by_home( |
| 46 | + partition, |
| 47 | + _data, |
| 48 | + _home_idx, |
| 49 | + keys_0, |
| 50 | + verbose=verbose, |
| 51 | + verify_count=verify_count, |
| 52 | + ) |
| 53 | + data_new_list.append(_data) |
| 54 | + # concatenate the data |
| 55 | + data_new = {k: np.concatenate([d[k] for d in data_new_list]) for k in data.keys()} |
| 56 | + return data_new |
| 57 | + |
| 58 | + |
| 59 | +def _distribute_dataset_by_home( |
| 60 | + partition: Partition | S2Partition, |
| 61 | + data: ParticleDataT, |
| 62 | + home_idx: np.ndarray, |
| 63 | + keys: list[str], |
| 64 | + *, |
| 65 | + verbose: int = 0, |
| 66 | + verify_count: bool = True, |
| 67 | +) -> ParticleDataT: |
| 68 | + total_to_send = len(home_idx) |
| 69 | + for d in data.values(): |
| 70 | + assert len(d) == total_to_send, "All data arrays must have the same length" |
| 71 | + |
| 72 | + # sort by rank |
| 73 | + s = np.argsort(home_idx) |
| 74 | + home_idx = home_idx[s] |
| 75 | + |
| 76 | + # offsets and counts |
| 77 | + send_displacements = np.searchsorted(home_idx, np.arange(partition.nranks)) |
| 78 | + send_displacements = send_displacements.astype(np.int32) |
| 79 | + send_counts = np.append(send_displacements[1:], total_to_send) - send_displacements |
| 80 | + send_counts = send_counts.astype(np.int32) |
| 81 | + |
| 82 | + # announce to each rank how many objects will be sent |
| 83 | + recv_counts = np.empty_like(send_counts) |
| 84 | + partition.comm.Alltoall(send_counts, recv_counts) |
| 85 | + recv_displacements = np.insert(np.cumsum(recv_counts)[:-1], 0, 0) |
| 86 | + |
| 87 | + # number of objects that this rank will receive |
| 88 | + total_to_receive = np.sum(recv_counts) |
| 89 | + |
| 90 | + # debug message |
| 91 | + if verbose > 1: |
| 92 | + for i in range(partition.nranks): |
| 93 | + if partition.rank == i: |
| 94 | + print(f"Distribute Debug Rank {i}") |
| 95 | + print(f" - rank has {total_to_send} particles") |
| 96 | + print(f" - rank receives {total_to_receive} particles") |
| 97 | + print(f" - send_counts: {send_counts}") |
| 98 | + print(f" - send_displacements: {send_displacements}") |
| 99 | + print(f" - recv_counts: {recv_counts}") |
| 100 | + print(f" - recv_displacements: {recv_displacements}") |
| 101 | + print("", flush=True) |
| 102 | + partition.comm.Barrier() |
| 103 | + |
| 104 | + # send data all-to-all, each array individually |
| 105 | + data_new = {k: np.empty(total_to_receive, dtype=data[k].dtype) for k in data.keys()} |
| 106 | + |
| 107 | + for k in keys: |
| 108 | + d = data[k][s] |
| 109 | + s_msg = [d, (send_counts, send_displacements), d.dtype.char] |
| 110 | + r_msg = [data_new[k], (recv_counts, recv_displacements), d.dtype.char] |
| 111 | + partition.comm.Alltoallv(s_msg, r_msg) |
| 112 | + |
| 113 | + if verify_count: |
| 114 | + key0 = keys[0] |
| 115 | + local_counts = np.array([len(data[key0]), len(data_new[key0])], dtype=np.int64) |
| 116 | + global_counts = np.empty_like(local_counts) |
| 117 | + partition.comm.Reduce(local_counts, global_counts, op=MPI.SUM, root=0) |
| 118 | + if partition.rank == 0 and global_counts[0] != global_counts[1]: |
| 119 | + print( |
| 120 | + f"Error in distribute: particle count during distribute was not " |
| 121 | + f"maintained ({global_counts[0]} -> {global_counts[1]})", |
| 122 | + file=sys.stderr, |
| 123 | + flush=True, |
| 124 | + ) |
| 125 | + partition.comm.Abort() |
| 126 | + |
| 127 | + return data_new |
0 commit comments