Skip to content

Commit 711a322

Browse files
authored
Merge pull request #34 from motrieu/feature/finalComments/nina
add comments
2 parents 61303f2 + e13d5c3 commit 711a322

File tree

4 files changed

+82
-4
lines changed

4 files changed

+82
-4
lines changed

src/computation/computationParallel.cpp

Lines changed: 29 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,7 @@ void ComputationParallel::applyBCOnDirichletBoundary()
112112

113113
if (partitioning_.ownPartitionContainsBottomBoundary())
114114
{
115+
// shiftIBeginV and shiftIEndV handle priority of boundary conditions
115116
int shiftIBeginV = 0;
116117
int shiftIEndV = 0;
117118
if (!partitioning_.ownPartitionContainsLeftBoundary())
@@ -126,6 +127,7 @@ void ComputationParallel::applyBCOnDirichletBoundary()
126127

127128
if (partitioning_.ownPartitionContainsTopBoundary())
128129
{
130+
// shiftIBeginV and shiftIEndV handle priority of boundary conditions
129131
int shiftIBeginV = 0;
130132
int shiftIEndV = 0;
131133
if (!partitioning_.ownPartitionContainsLeftBoundary())
@@ -196,6 +198,7 @@ void ComputationParallel::applyBCInHaloCellsAtDirichletBoundary()
196198

197199
if (partitioning_.ownPartitionContainsBottomBoundary())
198200
{
201+
// shiftIBeginU and shiftIEndU handle priority of boundary conditions
199202
int shiftIBeginU = 0;
200203
int shiftIEndU = 0;
201204
if (!partitioning_.ownPartitionContainsLeftBoundary())
@@ -213,6 +216,7 @@ void ComputationParallel::applyBCInHaloCellsAtDirichletBoundary()
213216

214217
if (partitioning_.ownPartitionContainsTopBoundary())
215218
{
219+
// shiftIBeginU and shiftIEndU handle priority of boundary conditions
216220
int shiftIBeginU = 0;
217221
int shiftIEndU = 0;
218222
if (!partitioning_.ownPartitionContainsLeftBoundary())
@@ -235,19 +239,21 @@ void ComputationParallel::receiveAndSendDiagonalPressureFromAndToOtherProcess()
235239

236240
std::vector<double> diagonalPBuffer(1);
237241

242+
// if the current rank has a lower-left neighbour, the own p(1,1)-value is sent to that neighbour
238243
if ((!partitioning_.ownPartitionContainsLeftBoundary()) && (!partitioning_.ownPartitionContainsBottomBoundary()))
239244
{
240245
diagonalPBuffer[0] = (*discretization_).p(1,1);
241246

242247
MPI_Isend(diagonalPBuffer.data(), 1, MPI_DOUBLE, partitioning_.bottomNeighbourRankNo()-1, 0, MPI_COMM_WORLD, &diagonalRequest);
243248
}
244249

250+
// if the current rank has an upper-right neighbour, the current rank receives the p(1,1)-value from that neighbour
245251
if ((!partitioning_.ownPartitionContainsRightBoundary()) && (!partitioning_.ownPartitionContainsTopBoundary()))
246252
{
247253
MPI_Irecv(diagonalPBuffer.data(), 1, MPI_DOUBLE, partitioning_.topNeighbourRankNo()+1, 0, MPI_COMM_WORLD, &diagonalRequest);
248254
}
249255

250-
256+
// the received p-value is then stored at p(N+1,N+1)
251257
if ((!partitioning_.ownPartitionContainsRightBoundary()) && (!partitioning_.ownPartitionContainsTopBoundary()))
252258
{
253259
MPI_Wait(&diagonalRequest, MPI_STATUS_IGNORE);
@@ -264,6 +270,9 @@ void ComputationParallel::receiveAndSendVelocitiesFromAndToOtherProcesses()
264270
MPI_Request upperRequest;
265271

266272
std::vector<double> leftUVBuffer(2*nCellsY_);
273+
274+
// if current rank does not contain left Dirichlet boundary,
275+
// then u and v need to be communicated between the current rank and its left neighbour rank
267276
if (!partitioning_.ownPartitionContainsLeftBoundary())
268277
{
269278
for (int j = 1; j < nCellsY_+1; j++)
@@ -278,6 +287,9 @@ void ComputationParallel::receiveAndSendVelocitiesFromAndToOtherProcesses()
278287
}
279288

280289
std::vector<double> rightUVBuffer(2*nCellsY_);
290+
291+
// if current rank does not contain right Dirichlet boundary,
292+
// then u and v need to be communicated between the current rank and its right neighbour rank
281293
if (!partitioning_.ownPartitionContainsRightBoundary())
282294
{
283295
for (int j = 1; j < nCellsY_+1; j++)
@@ -292,6 +304,9 @@ void ComputationParallel::receiveAndSendVelocitiesFromAndToOtherProcesses()
292304
}
293305

294306
std::vector<double> lowerUVBuffer(2*nCellsX_);
307+
308+
// if current rank does not contain bottom Dirichlet boundary,
309+
// then u and v need to be communicated between the current rank and its lower neighbour rank
295310
if (!partitioning_.ownPartitionContainsBottomBoundary())
296311
{
297312
for (int i = 1; i < nCellsX_+1; i++)
@@ -306,6 +321,9 @@ void ComputationParallel::receiveAndSendVelocitiesFromAndToOtherProcesses()
306321
}
307322

308323
std::vector<double> upperUVBuffer(2*nCellsX_);
324+
325+
// if current rank does not contain top Dirichlet boundary,
326+
// then u and v need to be communicated between the current rank and its upper neighbour rank
309327
if (!partitioning_.ownPartitionContainsTopBoundary())
310328
{
311329
for (int i = 1; i < nCellsX_+1; i++)
@@ -319,6 +337,8 @@ void ComputationParallel::receiveAndSendVelocitiesFromAndToOtherProcesses()
319337
MPI_Irecv(upperUVBuffer.data(), 2*nCellsX_, MPI_DOUBLE, partitioning_.topNeighbourRankNo(), 0, MPI_COMM_WORLD, &upperRequest);
320338
}
321339

340+
// received values are now written to the right place
341+
322342
if (!partitioning_.ownPartitionContainsLeftBoundary())
323343
{
324344
MPI_Wait(&leftRequest, MPI_STATUSES_IGNORE);
@@ -359,13 +379,14 @@ void ComputationParallel::receiveAndSendVelocitiesFromAndToOtherProcesses()
359379
}
360380
}
361381

362-
382+
// diagonal communication
363383
MPI_Request leftUpperDiagonalRequest;
364384
MPI_Request rightLowerDiagonalRequest;
365385

366386
std::vector<double> leftUpperDiagonalBuffer(1);
367387
std::vector<double> rightLowerDiagonalBuffer(1);
368388

389+
// if current rank has an upper-left neighbour, v(1,nCellsY_) needs to be communicated since it is needed in the donor cell scheme
369390
if (!partitioning_.ownPartitionContainsLeftBoundary() && !partitioning_.ownPartitionContainsTopBoundary())
370391
{
371392
leftUpperDiagonalBuffer[0] = (*discretization_).v(1,nCellsY_);
@@ -375,6 +396,7 @@ void ComputationParallel::receiveAndSendVelocitiesFromAndToOtherProcesses()
375396
MPI_Irecv(leftUpperDiagonalBuffer.data(), 1, MPI_DOUBLE, partitioning_.topNeighbourRankNo()-1, 0, MPI_COMM_WORLD, &leftUpperDiagonalRequest);
376397
}
377398

399+
// if current rank has a lower-right neighbour, u(nCellsX_,1) needs to be communicated since it is needed in the donor cell scheme
378400
if (!partitioning_.ownPartitionContainsRightBoundary() && !partitioning_.ownPartitionContainsBottomBoundary())
379401
{
380402
rightLowerDiagonalBuffer[0] = (*discretization_).u(nCellsX_,1);
@@ -461,6 +483,7 @@ void ComputationParallel::receiveAndSendPreliminaryVelocitiesFromAndToOtherProce
461483
std::vector<double> sendRightFBuffer(nCellsY_);
462484
std::vector<double> sendUpperGBuffer(nCellsX_);
463485

486+
// if current rank does not have a right Dirichlet boundary, then F on the local right boundary is sent to the right neighbour
464487
if (!partitioning_.ownPartitionContainsRightBoundary())
465488
{
466489
for (int j = 1; j < nCellsY_+1; j++)
@@ -469,6 +492,7 @@ void ComputationParallel::receiveAndSendPreliminaryVelocitiesFromAndToOtherProce
469492
MPI_Isend(sendRightFBuffer.data(), nCellsY_, MPI_DOUBLE, partitioning_.rightNeighbourRankNo(), 0, MPI_COMM_WORLD, &rightRequest);
470493
}
471494

495+
// if current rank does not have a top Dirichlet boundary, then G on the local top boundary is sent to the upper neighbour
472496
if (!partitioning_.ownPartitionContainsTopBoundary())
473497
{
474498
for (int i = 1; i < nCellsX_+1; i++)
@@ -477,11 +501,13 @@ void ComputationParallel::receiveAndSendPreliminaryVelocitiesFromAndToOtherProce
477501
MPI_Isend(sendUpperGBuffer.data(), nCellsX_, MPI_DOUBLE, partitioning_.topNeighbourRankNo(), 0, MPI_COMM_WORLD, &upperRequest);
478502
}
479503

504+
// if current rank does not have a left Dirichlet boundary, then F on the local left boundary is received from the left neighbour
480505
if (!partitioning_.ownPartitionContainsLeftBoundary())
481506
{
482507
MPI_Irecv(receiveLeftFBuffer.data(), nCellsY_, MPI_DOUBLE, partitioning_.leftNeighbourRankNo(), 0, MPI_COMM_WORLD, &leftRequest);
483508
}
484509

510+
// if current rank does not have a bottom Dirichlet boundary, then G on the local bottom boundary is received from the bottom neighbour
485511
if (!partitioning_.ownPartitionContainsBottomBoundary())
486512
{
487513
MPI_Irecv(receiveLowerGBuffer.data(), nCellsX_, MPI_DOUBLE, partitioning_.bottomNeighbourRankNo(), 0, MPI_COMM_WORLD, &lowerRequest);
@@ -522,6 +548,7 @@ void ComputationParallel::computePressure()
522548

523549
void ComputationParallel::computeVelocities()
524550
{
551+
// shift parameters determine whether new velocities u and v also need to be calculated at local boundary of the current rank
525552
int shiftIEndU = 0;
526553
int shiftJEndV = 0;
527554
if (!partitioning_.ownPartitionContainsRightBoundary())

src/computation/computationParallel.h

Lines changed: 48 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,30 +11,76 @@
1111
class ComputationParallel : public Computation
1212
{
1313
public:
14+
/// @brief solves the Navier-Stokes equations by partitioning the domain and therefore calculating parallelly
15+
/// the algorithm loops over time and calculates for each time step the updated velocities u and v, the
16+
/// pressure p based on the checker board scheme, the preliminary velocities F and G and the right hand side of the pressure Poisson problem (rhs)
1417
void runSimulation();
18+
19+
/// @brief initializes all objects needed in order to solve the Navier-Stokes equations parallelly
20+
/// reads in the parameters from the given parameter file
21+
/// decides based on parameter file which discretization scheme to choose
22+
/// @param argc number of arguments parsed in the command line, must be equal to 2
23+
/// @param argv the arguments parsed in the command line, contains the file name of the parameter file
1524
void initialize(int argc, char *argv[]);
1625

1726
private:
27+
/// @brief checks if current rank contains Dirichlet boundary
28+
/// sets boundary conditions of u and v on the local boundary based on given Dirichlet conditions
29+
/// handles priority of boundary conditions by introducing shifting parameters for v
1830
void applyBCOnDirichletBoundary();
31+
32+
/// @brief checks if current rank contains Dirichlet boundary
33+
/// sets boundary conditions of f and g on the local boundary based on given Dirichlet conditions
1934
void applyPreliminaryBCOnDirichletBoundary();
35+
36+
/// @brief checks if current rank contains Dirichlet boundary
37+
/// sets boundary conditions of u and v in halo cells based on given Dirichlet conditions and inner cell values
38+
/// handles priority of boundary conditions by introducing shifting parameters for u
2039
void applyBCInHaloCellsAtDirichletBoundary();
2140

22-
//for output
41+
/// @brief diagonal pressure communication for interpolate function and output
42+
/// if the current rank has a lower-left neighbour, the own p(1,1)-value is sent to that neighbour
43+
/// if the current rank has an upper-right neighbour, the current rank receives the p(1,1)-value from that neighbour
2344
void receiveAndSendDiagonalPressureFromAndToOtherProcess();
45+
46+
/// @brief u and v communication
47+
/// checks if current rank contains Dirichlet boundary
48+
/// if current rank does not contain a certain Dirichlet boundary, then the calculated u and v need to be communicated with the corresponding neighbour
49+
/// function also computes diagonal communication of u and v needed for donor cell scheme
2450
void receiveAndSendVelocitiesFromAndToOtherProcesses();
51+
52+
/// @brief computes common time step width for all processes by using serial computeTimeStepWidth()-function
53+
/// finds the minimum of all possible time step widths via MPI-function MPI_Allreduce and the operation MPI_MIN
2554
void computeTimeStepWidthParallel();
55+
56+
/// @brief computes preliminary velocities F and G
57+
/// if current rank does not have a right Dirichlet boundary, then F is also calculated on the right boundary of that rank
58+
/// if current rank does not have a top Dirichlet boundary, then G is also calculated on the upper boundary of that rank
2659
void computePreliminaryVelocities();
60+
61+
/// @brief F and G communication
62+
/// checks if current rank contains Dirichlet boundaries
63+
/// if current rank does not contain Dirichlet boundary, then F or G need to be sent or received from one of its neighbours
2764
void receiveAndSendPreliminaryVelocitiesFromAndToOtherProcesses();
65+
66+
/// @brief computes pressure by using the selected parallel pressure solver SOR-algorithm with checker board pattern
2867
void computePressure();
68+
69+
/// @brief computes new velocities u and v
70+
/// depending on whether current rank contains Dirichlet boundary or not, the new velocities u and v are also computed on the local boundary of that rank
71+
/// this is handled by parameters called shiftIEndU and shiftJEndV
2972
void computeVelocities();
3073

31-
74+
/// @brief shared pointer to the parallel pressure solver (SOR)
3275
std::unique_ptr<PressureSolverParallel> pressureSolverParallel_;
3376

77+
/// @brief unique pointer to the parallel output writer which produces vtk-output
3478
std::unique_ptr<OutputWriterParaviewParallel> outputWriterParaviewParallel_;
3579

80+
/// @brief unique pointer to the parallel output writer which produces readable txt-files
3681
std::unique_ptr<OutputWriterTextParallel> outputWriterTextParallel_;
3782

83+
/// @brief contains information about the partition of the physical domain needed for MPI
3884
Partitioning partitioning_;
3985

4086
/// @brief number of elements in x direction for this process (halo cells not included)

src/pressure_solver/pressureSolverParallel.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,12 +32,15 @@ void PressureSolverParallel::setDiagonalBoundaryValuesOnDirichletParallelForOutp
3232
// handles upper-left corner
3333
if (partitioning_.ownPartitionContainsLeftBoundary())
3434
(*discretization_).p(pIBegin_-1,pJEnd_) = (*discretization_).p(pIBegin_,pJEnd_);
35+
3536
// handles lower-left corner
3637
if ((partitioning_.ownPartitionContainsLeftBoundary()) && (partitioning_.ownPartitionContainsBottomBoundary()))
3738
(*discretization_).p(pIBegin_-1,pJBegin_-1) = (*discretization_).p(pIBegin_,pJBegin_);
39+
3840
// handles lower-right corner
3941
if (partitioning_.ownPartitionContainsBottomBoundary())
4042
(*discretization_).p(pIEnd_,pJBegin_-1) = (*discretization_).p(pIEnd_,pJBegin_);
43+
4144
// handles upper-right corner
4245
if (partitioning_.ownPartitionContainsRightBoundary())
4346
(*discretization_).p(pIEnd_,pJEnd_) = (*discretization_).p(pIEnd_-1,pJEnd_);

src/pressure_solver/pressureSolverParallel.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ class PressureSolverParallel : public PressureSolver
1515
/// @param partitioning contains information about the partition of the physical domain needed for MPI
1616
PressureSolverParallel(std::shared_ptr<Discretization> discretization, double epsilon, int maximumNumberOfIterations, Partitioning partitioning);
1717

18+
/// @brief handles pressure values in the corner halo cells when Dirichlet boundary exists (needs to be done for output)
1819
void setDiagonalBoundaryValuesOnDirichletParallelForOutput();
1920

2021
protected:
@@ -103,5 +104,6 @@ class PressureSolverParallel : public PressureSolver
103104
/// @brief number of elements in y direction for this process (halo cells not included)
104105
int nCellsY_;
105106

107+
/// @brief number of global cells
106108
double numberOfValuesGlobal_;
107109
};

0 commit comments

Comments
 (0)