|
7 | 7 | #include <catch2/catch.hpp>
|
8 | 8 |
|
9 | 9 | #if openPMD_HAVE_MPI
|
10 |
| -# include <mpi.h> |
11 |
| - |
12 |
| -# include <iostream> |
13 |
| -# include <algorithm> |
14 |
| -# include <string> |
15 |
| -# include <vector> |
16 |
| -# include <list> |
17 |
| -# include <memory> |
18 |
| -# include <tuple> |
| 10 | +# include <algorithm> |
| 11 | +# include <iostream> |
| 12 | +# include <list> |
| 13 | +# include <memory> |
| 14 | +# include <mpi.h> |
| 15 | +# include <numeric> // std::iota |
| 16 | +# include <string> |
| 17 | +# include <tuple> |
| 18 | +# include <vector> |
19 | 19 |
|
20 | 20 | using namespace openPMD;
|
21 | 21 |
|
@@ -337,6 +337,61 @@ TEST_CASE( "available_chunks_test", "[parallel][adios]" )
|
337 | 337 | available_chunks_test( "bp" );
|
338 | 338 | }
|
339 | 339 |
|
| 340 | +void |
| 341 | +extendDataset( std::string const & ext ) |
| 342 | +{ |
| 343 | + std::string filename = "../samples/parallelExtendDataset." + ext; |
| 344 | + int r_mpi_rank{ -1 }, r_mpi_size{ -1 }; |
| 345 | + MPI_Comm_rank( MPI_COMM_WORLD, &r_mpi_rank ); |
| 346 | + MPI_Comm_size( MPI_COMM_WORLD, &r_mpi_size ); |
| 347 | + unsigned mpi_rank{ static_cast< unsigned >( r_mpi_rank ) }, |
| 348 | + mpi_size{ static_cast< unsigned >( r_mpi_size ) }; |
| 349 | + std::vector< int > data1( 25 ); |
| 350 | + std::vector< int > data2( 25 ); |
| 351 | + std::iota( data1.begin(), data1.end(), 0 ); |
| 352 | + std::iota( data2.begin(), data2.end(), 25 ); |
| 353 | + { |
| 354 | + Series write( filename, Access::CREATE, MPI_COMM_WORLD ); |
| 355 | + if( ext == "bp" && write.backend() != "ADIOS2" ) |
| 356 | + { |
| 357 | + // dataset resizing unsupported in ADIOS1 |
| 358 | + return; |
| 359 | + } |
| 360 | + Dataset ds1{ Datatype::INT, { mpi_size, 25 } }; |
| 361 | + Dataset ds2{ { mpi_size, 50 } }; |
| 362 | + |
| 363 | + // array record component -> array record component |
| 364 | + // should work |
| 365 | + auto E_x = write.iterations[ 0 ].meshes[ "E" ][ "x" ]; |
| 366 | + E_x.resetDataset( ds1 ); |
| 367 | + E_x.storeChunk( data1, { mpi_rank, 0 }, { 1, 25 } ); |
| 368 | + write.flush(); |
| 369 | + |
| 370 | + E_x.resetDataset( ds2 ); |
| 371 | + E_x.storeChunk( data2, { mpi_rank, 25 }, { 1, 25 } ); |
| 372 | + write.flush(); |
| 373 | + } |
| 374 | + |
| 375 | + { |
| 376 | + Series read( filename, Access::READ_ONLY ); |
| 377 | + auto E_x = read.iterations[ 0 ].meshes[ "E" ][ "x" ]; |
| 378 | + REQUIRE( E_x.getExtent() == Extent{ mpi_size, 50 } ); |
| 379 | + auto chunk = E_x.loadChunk< int >( { 0, 0 }, { mpi_size, 50 } ); |
| 380 | + read.flush(); |
| 381 | + for( size_t rank = 0; rank < mpi_size; ++rank ) |
| 382 | + { |
| 383 | + for( size_t i = 0; i < 50; ++i ) |
| 384 | + { |
| 385 | + REQUIRE( chunk.get()[ i ] == i ); |
| 386 | + } |
| 387 | + } |
| 388 | + } |
| 389 | +} |
| 390 | + |
| 391 | +TEST_CASE( "extend_dataset", "[parallel]" ) |
| 392 | +{ |
| 393 | + extendDataset( "bp" ); |
| 394 | +} |
340 | 395 | #endif
|
341 | 396 |
|
342 | 397 | #if openPMD_HAVE_ADIOS1 && openPMD_HAVE_MPI
|
|
0 commit comments