@@ -59,7 +59,7 @@ static inline int mca_coll_acoll_reduce_xpmem_h(const void *sbuf, void *rbuf, si
59
59
int size ;
60
60
size_t total_dsize , dsize ;
61
61
62
- coll_acoll_init (module , comm , subc -> data , subc );
62
+ coll_acoll_init (module , comm , subc -> data , subc , 0 );
63
63
coll_acoll_data_t * data = subc -> data ;
64
64
if (NULL == data ) {
65
65
return -1 ;
@@ -82,15 +82,15 @@ static inline int mca_coll_acoll_reduce_xpmem_h(const void *sbuf, void *rbuf, si
82
82
if (!subc -> xpmem_use_sr_buf ) {
83
83
tmp_rbuf = (char * ) data -> scratch ;
84
84
tmp_sbuf = (char * ) data -> scratch + (subc -> xpmem_buf_size ) / 2 ;
85
- if ((sbuf == MPI_IN_PLACE )) {
85
+ if ((MPI_IN_PLACE == sbuf )) {
86
86
memcpy (tmp_sbuf , rbuf , total_dsize );
87
87
} else {
88
88
memcpy (tmp_sbuf , sbuf , total_dsize );
89
89
}
90
90
} else {
91
91
tmp_sbuf = (char * ) sbuf ;
92
92
tmp_rbuf = (char * ) rbuf ;
93
- if (sbuf == MPI_IN_PLACE ) {
93
+ if (MPI_IN_PLACE == sbuf ) {
94
94
tmp_sbuf = (char * ) rbuf ;
95
95
}
96
96
}
@@ -153,7 +153,7 @@ static inline int mca_coll_acoll_reduce_xpmem_h(const void *sbuf, void *rbuf, si
153
153
154
154
my_count_size = (l2_local_rank == (local_size - 1 )) ? chunk + (count % local_size ) : chunk ;
155
155
156
- if (l2_local_rank == 0 ) {
156
+ if (0 == l2_local_rank ) {
157
157
for (int i = 1 ; i < local_size ; i ++ ) {
158
158
ompi_op_reduce (op , (char * ) data -> xpmem_raddr [l2_gp [i ]], (char * ) tmp_rbuf ,
159
159
my_count_size , dtype );
@@ -192,7 +192,7 @@ static inline int mca_coll_acoll_allreduce_xpmem_f(const void *sbuf, void *rbuf,
192
192
int size ;
193
193
size_t total_dsize , dsize ;
194
194
195
- coll_acoll_init (module , comm , subc -> data , subc );
195
+ coll_acoll_init (module , comm , subc -> data , subc , 0 );
196
196
coll_acoll_data_t * data = subc -> data ;
197
197
if (NULL == data ) {
198
198
return -1 ;
@@ -207,15 +207,15 @@ static inline int mca_coll_acoll_allreduce_xpmem_f(const void *sbuf, void *rbuf,
207
207
if (!subc -> xpmem_use_sr_buf ) {
208
208
tmp_rbuf = (char * ) data -> scratch ;
209
209
tmp_sbuf = (char * ) data -> scratch + (subc -> xpmem_buf_size ) / 2 ;
210
- if ((sbuf == MPI_IN_PLACE )) {
210
+ if ((MPI_IN_PLACE == sbuf )) {
211
211
memcpy (tmp_sbuf , rbuf , total_dsize );
212
212
} else {
213
213
memcpy (tmp_sbuf , sbuf , total_dsize );
214
214
}
215
215
} else {
216
216
tmp_sbuf = (char * ) sbuf ;
217
217
tmp_rbuf = (char * ) rbuf ;
218
- if (sbuf == MPI_IN_PLACE ) {
218
+ if (MPI_IN_PLACE == sbuf ) {
219
219
tmp_sbuf = (char * ) rbuf ;
220
220
}
221
221
}
@@ -242,7 +242,7 @@ static inline int mca_coll_acoll_allreduce_xpmem_f(const void *sbuf, void *rbuf,
242
242
243
243
size_t chunk = count / size ;
244
244
size_t my_count_size = (rank == (size - 1 )) ? (count / size ) + count % size : count / size ;
245
- if (rank == 0 ) {
245
+ if (0 == rank ) {
246
246
if (sbuf != MPI_IN_PLACE )
247
247
memcpy (tmp_rbuf , sbuf , my_count_size * dsize );
248
248
} else {
@@ -299,7 +299,7 @@ void mca_coll_acoll_sync(coll_acoll_data_t *data, int offset, int *group, int gp
299
299
opal_atomic_wmb ();
300
300
301
301
int val ;
302
- if (up == 1 ) {
302
+ if (1 == up ) {
303
303
val = data -> sync [0 ];
304
304
} else {
305
305
val = data -> sync [1 ];
@@ -346,7 +346,7 @@ void mca_coll_acoll_sync(coll_acoll_data_t *data, int offset, int *group, int gp
346
346
__ATOMIC_RELAXED );
347
347
}
348
348
}
349
- if (up == 1 ) {
349
+ if (1 == up ) {
350
350
data -> sync [0 ] = val ;
351
351
} else {
352
352
data -> sync [1 ] = val ;
@@ -361,8 +361,7 @@ int mca_coll_acoll_allreduce_small_msgs_h(const void *sbuf, void *rbuf, size_t c
361
361
{
362
362
size_t dsize ;
363
363
int err = MPI_SUCCESS ;
364
-
365
- coll_acoll_init (module , comm , subc -> data , subc );
364
+ coll_acoll_init (module , comm , subc -> data , subc , 0 );
366
365
coll_acoll_data_t * data = subc -> data ;
367
366
if (NULL == data ) {
368
367
return -1 ;
@@ -434,7 +433,7 @@ int mca_coll_acoll_allreduce_small_msgs_h(const void *sbuf, void *rbuf, size_t c
434
433
}
435
434
436
435
if (intra && (ompi_comm_size (subc -> numa_comm ) > 1 )) {
437
- err = mca_coll_acoll_bcast (rbuf , count , dtype , 0 , subc -> numa_comm , module );
436
+ err = ompi_coll_base_bcast_intra_basic_linear (rbuf , count , dtype , 0 , subc -> numa_comm , module );
438
437
}
439
438
return err ;
440
439
}
@@ -451,7 +450,7 @@ int mca_coll_acoll_allreduce_intra(const void *sbuf, void *rbuf, size_t count,
451
450
ompi_datatype_type_size (dtype , & dsize );
452
451
total_dsize = dsize * count ;
453
452
454
- if (size == 1 ) {
453
+ if (1 == size ) {
455
454
if (MPI_IN_PLACE != sbuf ) {
456
455
memcpy ((char * ) rbuf , sbuf , total_dsize );
457
456
}
@@ -483,7 +482,7 @@ int mca_coll_acoll_allreduce_intra(const void *sbuf, void *rbuf, size_t count,
483
482
484
483
alg = coll_allreduce_decision_fixed (size , total_dsize );
485
484
486
- if (num_nodes == 1 ) {
485
+ if (1 == num_nodes ) {
487
486
if (total_dsize < 32 ) {
488
487
return ompi_coll_base_allreduce_intra_recursivedoubling (sbuf , rbuf , count , dtype , op ,
489
488
comm , module );
@@ -494,10 +493,10 @@ int mca_coll_acoll_allreduce_intra(const void *sbuf, void *rbuf, size_t count,
494
493
return ompi_coll_base_allreduce_intra_recursivedoubling (sbuf , rbuf , count , dtype , op ,
495
494
comm , module );
496
495
} else if (total_dsize < 65536 ) {
497
- if (alg == 1 ) {
496
+ if (1 == alg ) {
498
497
return ompi_coll_base_allreduce_intra_recursivedoubling (sbuf , rbuf , count , dtype ,
499
498
op , comm , module );
500
- } else if (alg == 2 ) {
499
+ } else if (2 == alg ) {
501
500
return ompi_coll_base_allreduce_intra_redscat_allgather (sbuf , rbuf , count , dtype ,
502
501
op , comm , module );
503
502
} else { /*alg == 3 */
0 commit comments