Skip to content

Commit a53aaea

Browse files
committed
Move initializeRecPartition into constructor
1 parent 956bb55 commit a53aaea

7 files changed

+43
-53
lines changed

benchmark/cajita/Cajita_ParticleDynamicPartitionerPerformance.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -146,8 +146,8 @@ void performanceTest( std::ostream& stream, MPI_Comm comm,
146146
for ( int t = 0; t < num_run; ++t )
147147
{
148148
// ensure every optimization process starts from the same status
149-
partitioner.initializeRecPartition(
150-
ave_partition[0], ave_partition[1], ave_partition[2] );
149+
partitioner.initializePartitionByAverage( comm,
150+
global_num_cell );
151151

152152
// compute local workload
153153
local_workload_timer.start( p );

benchmark/cajita/Cajita_SparseMapDynamicPartitionerPerformance.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -164,8 +164,8 @@ void performanceTest( std::ostream& stream, MPI_Comm comm,
164164
for ( int t = 0; t < num_run; ++t )
165165
{
166166
// ensure every optimization process starts from the same status
167-
partitioner.initializeRecPartition(
168-
ave_partition[0], ave_partition[1], ave_partition[2] );
167+
partitioner.initializePartitionByAverage( comm,
168+
global_num_cell );
169169

170170
// compute local workload
171171
local_workload_timer.start( frac );

cajita/src/Cajita_DynamicPartitioner.hpp

Lines changed: 38 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,7 @@ class DynamicPartitioner : public BlockPartitioner<NumSpaceDim>
101101
// compute the ranks_per_dim from MPI communicator
102102
allocate( global_cells_per_dim );
103103
ranksPerDimension( comm );
104+
initializePartitionByAverage( comm, global_cells_per_dim );
104105
}
105106

106107
/*!
@@ -126,6 +127,7 @@ class DynamicPartitioner : public BlockPartitioner<NumSpaceDim>
126127
int comm_size;
127128
MPI_Comm_size( comm, &comm_size );
128129
MPI_Dims_create( comm_size, num_space_dim, _ranks_per_dim.data() );
130+
initializePartitionByAverage( comm, global_cells_per_dim );
129131
}
130132

131133
/*!
@@ -267,18 +269,49 @@ class DynamicPartitioner : public BlockPartitioner<NumSpaceDim>
267269
}
268270

269271
/*!
270-
\brief Initialize the tile partition; partition in each dimension
272+
\brief Initialize the tile partition by average size
273+
\param comm The communicator to use for initializing partitioning
274+
\param global_cells_per_dim 3D array, global cells in each dimension
275+
*/
276+
void initializePartitionByAverage(
277+
MPI_Comm comm,
278+
const std::array<int, num_space_dim>& global_cells_per_dim )
279+
{
280+
std::array<int, 3> global_num_tile = {
281+
global_cells_per_dim[0] / (int)cell_num_per_tile_dim,
282+
global_cells_per_dim[1] / (int)cell_num_per_tile_dim,
283+
global_cells_per_dim[2] / (int)cell_num_per_tile_dim };
284+
285+
auto ranks_per_dim = ranksPerDimension( comm, global_cells_per_dim );
286+
std::array<std::vector<int>, 3> rec_partitions;
287+
for ( int d = 0; d < 3; ++d )
288+
{
289+
int ele = global_num_tile[d] / ranks_per_dim[d];
290+
int part = 0;
291+
for ( int i = 0; i < ranks_per_dim[d]; ++i )
292+
{
293+
rec_partitions[d].push_back( part );
294+
part += ele;
295+
}
296+
rec_partitions[d].push_back( global_num_tile[d] );
297+
}
298+
299+
setRecPartition( rec_partitions[0], rec_partitions[1],
300+
rec_partitions[2] );
301+
}
302+
303+
/*!
304+
\brief Set the tile partition; partition in each dimension
271305
has the form [0, p_1, ..., p_n, total_tile_num], so the partition
272306
would be [0, p_1), [p_1, p_2) ... [p_n, total_tile_num]
273307
\param rec_partition_i partition array in dimension i
274308
\param rec_partition_j partition array in dimension j
275309
\param rec_partition_k partition array in dimension k
276310
*/
277-
void initializeRecPartition( std::vector<int>& rec_partition_i,
278-
std::vector<int>& rec_partition_j,
279-
std::vector<int>& rec_partition_k )
311+
void setRecPartition( std::vector<int>& rec_partition_i,
312+
std::vector<int>& rec_partition_j,
313+
std::vector<int>& rec_partition_k )
280314
{
281-
282315
int max_size = 0;
283316
for ( std::size_t d = 0; d < num_space_dim; ++d )
284317
max_size =

cajita/unit_test/tstGlobalGrid.hpp

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -446,8 +446,6 @@ void sparseGridTest3d()
446446
}
447447
rec_partitions[d].push_back( global_num_tile[d] );
448448
}
449-
partitioner.initializeRecPartition( rec_partitions[0], rec_partitions[1],
450-
rec_partitions[2] );
451449

452450
// Create spares global grid
453451
auto global_grid = createGlobalGrid( MPI_COMM_WORLD, global_mesh,
@@ -562,7 +560,7 @@ void sparseGridTest3d()
562560
for ( int id = 1; id < ranks_per_dim[d]; id++ )
563561
part[d][id] += 1;
564562

565-
partitioner.initializeRecPartition( part[0], part[1], part[2] );
563+
partitioner.setRecPartition( part[0], part[1], part[2] );
566564

567565
std::array<int, 3> new_owned_num_cell;
568566
std::array<int, 3> new_global_cell_offset;

cajita/unit_test/tstParticleDynamicPartitioner.hpp

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -177,23 +177,6 @@ void random_distribution_automatic_rank( int occupy_num_per_rank )
177177
MPI_Barrier( MPI_COMM_WORLD );
178178
}
179179

180-
// init partitions (average partition)
181-
std::array<std::vector<int>, 3> rec_partitions;
182-
for ( int d = 0; d < 3; ++d )
183-
{
184-
int ele = size_tile_per_dim / ranks_per_dim[d];
185-
int part = 0;
186-
for ( int i = 0; i < ranks_per_dim[d]; ++i )
187-
{
188-
rec_partitions[d].push_back( part );
189-
part += ele;
190-
}
191-
rec_partitions[d].push_back( size_tile_per_dim );
192-
}
193-
194-
partitioner.initializeRecPartition( rec_partitions[0], rec_partitions[1],
195-
rec_partitions[2] );
196-
197180
// basic settings for domain size and position
198181
double cell_size = 0.1;
199182
std::array<double, 3> global_low_corner = { 1.2, 3.3, -2.8 };

cajita/unit_test/tstSparseLocalGrid.hpp

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -33,9 +33,6 @@ void sparseLocalGridTest( EntityType t2 )
3333
double cell_size = 0.23;
3434
std::array<int, 3> global_num_cell = { 16, 32, 64 };
3535
int cell_num_per_tile_dim = 4;
36-
std::array<int, 3> global_num_tile = { 16 / cell_num_per_tile_dim,
37-
32 / cell_num_per_tile_dim,
38-
64 / cell_num_per_tile_dim };
3936
std::array<double, 3> global_low_corner = { 1.2, 3.3, -2.8 };
4037
std::array<double, 3> global_high_corner = {
4138
global_low_corner[0] + cell_size * global_num_cell[0],
@@ -48,22 +45,6 @@ void sparseLocalGridTest( EntityType t2 )
4845
std::array<bool, 3> periodic = { false, false, false };
4946
DynamicPartitioner<TEST_DEVICE, 4> partitioner( MPI_COMM_WORLD,
5047
global_num_cell, 10 );
51-
auto ranks_per_dim =
52-
partitioner.ranksPerDimension( MPI_COMM_WORLD, global_num_cell );
53-
std::array<std::vector<int>, 3> rec_partitions;
54-
for ( int d = 0; d < 3; ++d )
55-
{
56-
int ele = global_num_tile[d] / ranks_per_dim[d];
57-
int part = 0;
58-
for ( int i = 0; i < ranks_per_dim[d]; ++i )
59-
{
60-
rec_partitions[d].push_back( part );
61-
part += ele;
62-
}
63-
rec_partitions[d].push_back( global_num_tile[d] );
64-
}
65-
partitioner.initializeRecPartition( rec_partitions[0], rec_partitions[1],
66-
rec_partitions[2] );
6748

6849
// Create global grid
6950
auto global_grid_ptr = Cajita::createGlobalGrid(

cajita/unit_test/tstSparseMapDynamicPartitioner.hpp

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -74,8 +74,6 @@ void uniform_distribution_automatic_rank()
7474
}
7575
rec_partitions[d].push_back( size_tile_per_dim );
7676
}
77-
partitioner.initializeRecPartition( rec_partitions[0], rec_partitions[1],
78-
rec_partitions[2] );
7977

8078
// test getCurrentPartition function
8179
{
@@ -324,9 +322,6 @@ void random_distribution_automatic_rank( int occupy_num_per_rank )
324322
rec_partitions[d].push_back( size_tile_per_dim );
325323
}
326324

327-
partitioner.initializeRecPartition( rec_partitions[0], rec_partitions[1],
328-
rec_partitions[2] );
329-
330325
// basic settings for domain size and position
331326
double cell_size = 0.1;
332327
int pre_alloc_size = size_per_dim * size_per_dim;

0 commit comments

Comments
 (0)