@@ -112,6 +112,7 @@ void ComputationParallel::applyBCOnDirichletBoundary()
112112
113113 if (partitioning_.ownPartitionContainsBottomBoundary ())
114114 {
115+ // shiftIBeginV and shiftIEndV handle priority of boundary conditions
115116 int shiftIBeginV = 0 ;
116117 int shiftIEndV = 0 ;
117118 if (!partitioning_.ownPartitionContainsLeftBoundary ())
@@ -126,6 +127,7 @@ void ComputationParallel::applyBCOnDirichletBoundary()
126127
127128 if (partitioning_.ownPartitionContainsTopBoundary ())
128129 {
130+ // shiftIBeginV and shiftIEndV handle priority of boundary conditions
129131 int shiftIBeginV = 0 ;
130132 int shiftIEndV = 0 ;
131133 if (!partitioning_.ownPartitionContainsLeftBoundary ())
@@ -196,6 +198,7 @@ void ComputationParallel::applyBCInHaloCellsAtDirichletBoundary()
196198
197199 if (partitioning_.ownPartitionContainsBottomBoundary ())
198200 {
201+ // shiftIBeginU and shiftIEndU handle priority of boundary conditions
199202 int shiftIBeginU = 0 ;
200203 int shiftIEndU = 0 ;
201204 if (!partitioning_.ownPartitionContainsLeftBoundary ())
@@ -213,6 +216,7 @@ void ComputationParallel::applyBCInHaloCellsAtDirichletBoundary()
213216
214217 if (partitioning_.ownPartitionContainsTopBoundary ())
215218 {
219+ // shiftIBeginU and shiftIEndU handle priority of boundary conditions
216220 int shiftIBeginU = 0 ;
217221 int shiftIEndU = 0 ;
218222 if (!partitioning_.ownPartitionContainsLeftBoundary ())
@@ -235,19 +239,21 @@ void ComputationParallel::receiveAndSendDiagonalPressureFromAndToOtherProcess()
235239
236240 std::vector<double > diagonalPBuffer (1 );
237241
242+ // if the current rank has a lower-left neighbour, the own p(1,1)-value is sent to that neighbour
238243 if ((!partitioning_.ownPartitionContainsLeftBoundary ()) && (!partitioning_.ownPartitionContainsBottomBoundary ()))
239244 {
240245 diagonalPBuffer[0 ] = (*discretization_).p (1 ,1 );
241246
242247 MPI_Isend (diagonalPBuffer.data (), 1 , MPI_DOUBLE, partitioning_.bottomNeighbourRankNo ()-1 , 0 , MPI_COMM_WORLD, &diagonalRequest);
243248 }
244249
250+ // if the current rank has an upper-right neighbour, the current rank receives the p(1,1)-value from that neighbour
245251 if ((!partitioning_.ownPartitionContainsRightBoundary ()) && (!partitioning_.ownPartitionContainsTopBoundary ()))
246252 {
247253 MPI_Irecv (diagonalPBuffer.data (), 1 , MPI_DOUBLE, partitioning_.topNeighbourRankNo ()+1 , 0 , MPI_COMM_WORLD, &diagonalRequest);
248254 }
249255
250-
256+ // the received p-value is then stored at p(N+1,N+1)
251257 if ((!partitioning_.ownPartitionContainsRightBoundary ()) && (!partitioning_.ownPartitionContainsTopBoundary ()))
252258 {
253259 MPI_Wait (&diagonalRequest, MPI_STATUS_IGNORE);
@@ -264,6 +270,9 @@ void ComputationParallel::receiveAndSendVelocitiesFromAndToOtherProcesses()
264270 MPI_Request upperRequest;
265271
266272 std::vector<double > leftUVBuffer (2 *nCellsY_);
273+
274+ // if current rank does not contain left Dirichlet boundary,
275+ // then u and v need to be communicated between the current rank and its left neighbour rank
267276 if (!partitioning_.ownPartitionContainsLeftBoundary ())
268277 {
269278 for (int j = 1 ; j < nCellsY_+1 ; j++)
@@ -278,6 +287,9 @@ void ComputationParallel::receiveAndSendVelocitiesFromAndToOtherProcesses()
278287 }
279288
280289 std::vector<double > rightUVBuffer (2 *nCellsY_);
290+
291+ // if current rank does not contain right Dirichlet boundary,
292+ // then u and v need to be communicated between the current rank and its right neighbour rank
281293 if (!partitioning_.ownPartitionContainsRightBoundary ())
282294 {
283295 for (int j = 1 ; j < nCellsY_+1 ; j++)
@@ -292,6 +304,9 @@ void ComputationParallel::receiveAndSendVelocitiesFromAndToOtherProcesses()
292304 }
293305
294306 std::vector<double > lowerUVBuffer (2 *nCellsX_);
307+
308+ // if current rank does not contain bottom Dirichlet boundary,
309+ // then u and v need to be communicated between the current rank and its lower neighbour rank
295310 if (!partitioning_.ownPartitionContainsBottomBoundary ())
296311 {
297312 for (int i = 1 ; i < nCellsX_+1 ; i++)
@@ -306,6 +321,9 @@ void ComputationParallel::receiveAndSendVelocitiesFromAndToOtherProcesses()
306321 }
307322
308323 std::vector<double > upperUVBuffer (2 *nCellsX_);
324+
325+ // if current rank does not contain top Dirichlet boundary,
326+ // then u and v need to be communicated between the current rank and its upper neighbour rank
309327 if (!partitioning_.ownPartitionContainsTopBoundary ())
310328 {
311329 for (int i = 1 ; i < nCellsX_+1 ; i++)
@@ -319,6 +337,8 @@ void ComputationParallel::receiveAndSendVelocitiesFromAndToOtherProcesses()
319337 MPI_Irecv (upperUVBuffer.data (), 2 *nCellsX_, MPI_DOUBLE, partitioning_.topNeighbourRankNo (), 0 , MPI_COMM_WORLD, &upperRequest);
320338 }
321339
340+ // received values are now written to the right place
341+
322342 if (!partitioning_.ownPartitionContainsLeftBoundary ())
323343 {
324344 MPI_Wait (&leftRequest, MPI_STATUSES_IGNORE);
@@ -359,13 +379,14 @@ void ComputationParallel::receiveAndSendVelocitiesFromAndToOtherProcesses()
359379 }
360380 }
361381
362-
382+ // diagonal communication
363383 MPI_Request leftUpperDiagonalRequest;
364384 MPI_Request rightLowerDiagonalRequest;
365385
366386 std::vector<double > leftUpperDiagonalBuffer (1 );
367387 std::vector<double > rightLowerDiagonalBuffer (1 );
368388
389+ // if current rank has an upper-left neighbour, v(1,nCellsY_) needs to be communicated since it is needed in the donor cell scheme
369390 if (!partitioning_.ownPartitionContainsLeftBoundary () && !partitioning_.ownPartitionContainsTopBoundary ())
370391 {
371392 leftUpperDiagonalBuffer[0 ] = (*discretization_).v (1 ,nCellsY_);
@@ -375,6 +396,7 @@ void ComputationParallel::receiveAndSendVelocitiesFromAndToOtherProcesses()
375396 MPI_Irecv (leftUpperDiagonalBuffer.data (), 1 , MPI_DOUBLE, partitioning_.topNeighbourRankNo ()-1 , 0 , MPI_COMM_WORLD, &leftUpperDiagonalRequest);
376397 }
377398
399+ // if current rank has a lower-right neighbour, u(nCellsX_,1) needs to be communicated since it is needed in the donor cell scheme
378400 if (!partitioning_.ownPartitionContainsRightBoundary () && !partitioning_.ownPartitionContainsBottomBoundary ())
379401 {
380402 rightLowerDiagonalBuffer[0 ] = (*discretization_).u (nCellsX_,1 );
@@ -461,6 +483,7 @@ void ComputationParallel::receiveAndSendPreliminaryVelocitiesFromAndToOtherProce
461483 std::vector<double > sendRightFBuffer (nCellsY_);
462484 std::vector<double > sendUpperGBuffer (nCellsX_);
463485
486+ // if current rank does not have a right Dirichlet boundary, then F on the local right boundary is sent to the right neighbour
464487 if (!partitioning_.ownPartitionContainsRightBoundary ())
465488 {
466489 for (int j = 1 ; j < nCellsY_+1 ; j++)
@@ -469,6 +492,7 @@ void ComputationParallel::receiveAndSendPreliminaryVelocitiesFromAndToOtherProce
469492 MPI_Isend (sendRightFBuffer.data (), nCellsY_, MPI_DOUBLE, partitioning_.rightNeighbourRankNo (), 0 , MPI_COMM_WORLD, &rightRequest);
470493 }
471494
495+ // if current rank does not have a top Dirichlet boundary, then G on the local top boundary is sent to the upper neighbour
472496 if (!partitioning_.ownPartitionContainsTopBoundary ())
473497 {
474498 for (int i = 1 ; i < nCellsX_+1 ; i++)
@@ -477,11 +501,13 @@ void ComputationParallel::receiveAndSendPreliminaryVelocitiesFromAndToOtherProce
477501 MPI_Isend (sendUpperGBuffer.data (), nCellsX_, MPI_DOUBLE, partitioning_.topNeighbourRankNo (), 0 , MPI_COMM_WORLD, &upperRequest);
478502 }
479503
504+ // if current rank does not have a left Dirichlet boundary, then F on the local left boundary is received from the left neighbour
480505 if (!partitioning_.ownPartitionContainsLeftBoundary ())
481506 {
482507 MPI_Irecv (receiveLeftFBuffer.data (), nCellsY_, MPI_DOUBLE, partitioning_.leftNeighbourRankNo (), 0 , MPI_COMM_WORLD, &leftRequest);
483508 }
484509
510+ // if current rank does not have a bottom Dirichlet boundary, then G on the local bottom boundary is received from the bottom neighbour
485511 if (!partitioning_.ownPartitionContainsBottomBoundary ())
486512 {
487513 MPI_Irecv (receiveLowerGBuffer.data (), nCellsX_, MPI_DOUBLE, partitioning_.bottomNeighbourRankNo (), 0 , MPI_COMM_WORLD, &lowerRequest);
@@ -522,6 +548,7 @@ void ComputationParallel::computePressure()
522548
523549void ComputationParallel::computeVelocities ()
524550{
551+ // shift parameters determine whether new velocities u and v also need to be calculated at local boundary of the current rank
525552 int shiftIEndU = 0 ;
526553 int shiftJEndV = 0 ;
527554 if (!partitioning_.ownPartitionContainsRightBoundary ())
0 commit comments