-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathmpi_particles.py
More file actions
64 lines (51 loc) · 2.11 KB
/
mpi_particles.py
File metadata and controls
64 lines (51 loc) · 2.11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
from mpi4py import MPI
import numpy as np
import time
from lib_particles import Particle, compute_new_particle_position
comm = MPI.COMM_WORLD # get MPI communicator
process = comm.Get_rank() # get process index
n_processes = comm.Get_size() # get total number of processes
# initialize a random number generator for each process
rng = np.random.RandomState(1235 + process)
# define the number of particles per processes and time steps to simulate
n_particles_per_process = 3
n_particles = n_particles_per_process * n_processes
n_time_steps = 5
# determine work distribution between ranks
x_min = process / n_processes
x_max = (process + 1) / n_processes
# initialize local particles assuming homogeneity across processes
local_particles = [
Particle(n_particles_per_process * process + idx, rng.uniform(x_min, x_max))
for idx in range(n_particles_per_process)
]
print(
f"initial configuration ({process = }):",
list(sorted(local_particles, key=lambda p: p.idx)),
)
# exit()
# simulate dynamics for a couple of time steps
for time_step in range(n_time_steps):
# propagate particle positions
for particle in local_particles:
particle.x = compute_new_particle_position(particle.x)
# communicate particle positions
received_particles = []
for source_process in range(n_processes):
# we communicate the list `local_particles` from rank `root` to all
# other ranks; all ranks receive the same list from rank `root`, which
# is stored in `recv_buffer`; communication is *blocking*, i.e., all
# ranks wait here until the have received the data
received_particles += comm.bcast(local_particles, root=source_process)
assert len(received_particles) == n_particles
# after the communication is done, we figure out which particles this
# rank now needs to keep track off
local_particles = []
for particle_new in received_particles:
if x_min <= particle_new.x < x_max:
local_particles.append(particle_new)
print()
print(
f"{time_step = } ({process = }):",
list(sorted(local_particles, key=lambda p: p.idx)),
)