From 70b3eb81b6c9e0e2639c35468827198807dc266b Mon Sep 17 00:00:00 2001 From: Wei-keng Liao Date: Mon, 4 Aug 2025 01:23:15 -0500 Subject: [PATCH 1/9] support MPI-IO large count APIs --- darshan-runtime/configure.ac | 44 +- darshan-runtime/lib/darshan-dynamic.h | 33 + darshan-runtime/lib/darshan-mpiio.c | 682 ++++++++++++++++++ .../share/ld-opts/darshan-mpiio-ld-opts | 56 ++ 4 files changed, 814 insertions(+), 1 deletion(-) diff --git a/darshan-runtime/configure.ac b/darshan-runtime/configure.ac index 54b22c0b4..dba6f4dc6 100644 --- a/darshan-runtime/configure.ac +++ b/darshan-runtime/configure.ac @@ -770,6 +770,46 @@ if test "x$enable_darshan_runtime" = xyes ; then [AC_MSG_RESULT(yes)], [AC_MSG_RESULT(no)])], [AC_MSG_RESULT(no)] ) + + dnl MPI_Count was first introduced in MPI 3.0. Check MPI-IOfunctions that make + dnl use of MPI_Count. + AC_CHECK_FUNCS([MPI_File_read_c \ + MPI_File_read_all_c \ + MPI_File_read_all_begin_c \ + MPI_File_read_at_c \ + MPI_File_read_at_all_c \ + MPI_File_read_at_all_begin_c \ + MPI_File_read_ordered_c \ + MPI_File_read_ordered_begin_c \ + MPI_File_read_shared_c \ + MPI_File_write_c \ + MPI_File_write_all_c \ + MPI_File_write_all_begin_c \ + MPI_File_write_at_c \ + MPI_File_write_at_all_c \ + MPI_File_write_at_all_begin_c \ + MPI_File_write_ordered_c \ + MPI_File_write_ordered_begin_c \ + MPI_File_write_shared_c \ + MPI_File_iread_c \ + MPI_File_iread_all_c \ + MPI_File_iread_at_c \ + MPI_File_iread_at_all_c \ + MPI_File_iread_shared_c \ + MPI_File_iwrite_c \ + MPI_File_iwrite_all_c \ + MPI_File_iwrite_at_c \ + MPI_File_iwrite_at_all_c \ + MPI_File_iwrite_shared_c], + [have_mpi_io_large_count_apis=yes], + [have_mpi_io_large_count_apis=no]) + + if test "x$have_mpi_io_large_count_apis" = "xyes" ; then + AC_DEFINE(HAVE_MPI_LARGE_COUNT, 1, + [Define if MPI-IO support large count feature]) + fi + else + have_mpi_io_large_count_apis=no fi # @@ -833,6 +873,7 @@ else enable_ldms_mod=no with_log_path= with_jobid_env= + have_mpi_io_large_count_apis=no fi AC_SUBST(ENABLE_LD_PRELOAD, ["$enable_ld_preload"]) @@ -972,5 +1013,6 @@ if test "x$enable_darshan_runtime" = xyes ; then Log file env variables - $__log_path_by_env Location of Darshan log files - $__log_path Job ID env variable - $with_jobid_env - MPI-IO hints - $__DARSHAN_LOG_HINTS" + MPI-IO hints - $__DARSHAN_LOG_HINTS + MPI-IO large-count support - $have_mpi_io_large_count_apis" fi diff --git a/darshan-runtime/lib/darshan-dynamic.h b/darshan-runtime/lib/darshan-dynamic.h index db370324c..12397d182 100644 --- a/darshan-runtime/lib/darshan-dynamic.h +++ b/darshan-runtime/lib/darshan-dynamic.h @@ -148,6 +148,39 @@ DARSHAN_EXTERN_DECL(PMPI_Gather, int, (const void *sendbuf, int sendcount, MPI_D DARSHAN_EXTERN_DECL(PMPI_Gather, int, (void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)); #endif DARSHAN_EXTERN_DECL(PMPI_Barrier, int, (MPI_Comm comm)); + +/* If MPI-IO supports large-count feature */ +#ifdef HAVE_MPI_LARGE_COUNT +DARSHAN_EXTERN_DECL(PMPI_File_iread_all_c, int, (MPI_File fh, void *buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); +DARSHAN_EXTERN_DECL(PMPI_File_iread_at_all_c, int, (MPI_File fh, MPI_Offset offset, void *buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); +DARSHAN_EXTERN_DECL(PMPI_File_iread_at_c, int, (MPI_File fh, MPI_Offset offset, void *buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); +DARSHAN_EXTERN_DECL(PMPI_File_iread_c, int, (MPI_File fh, void *buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); +DARSHAN_EXTERN_DECL(PMPI_File_iread_shared_c, int, (MPI_File fh, void *buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); +DARSHAN_EXTERN_DECL(PMPI_File_iwrite_all_c, int, (MPI_File fh, const void *buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); +DARSHAN_EXTERN_DECL(PMPI_File_iwrite_at_all_c, int, (MPI_File fh, MPI_Offset offset, const void *buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); +DARSHAN_EXTERN_DECL(PMPI_File_iwrite_at_c, int, (MPI_File fh, MPI_Offset offset, const void *buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); +DARSHAN_EXTERN_DECL(PMPI_File_iwrite_c, int, (MPI_File fh, const void *buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); +DARSHAN_EXTERN_DECL(PMPI_File_iwrite_shared_c, int, (MPI_File fh, const void *buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); +DARSHAN_EXTERN_DECL(PMPI_File_read_all_begin_c, int, (MPI_File fh, void *buf, MPI_Count count, MPI_Datatype datatype)); +DARSHAN_EXTERN_DECL(PMPI_File_read_all_c, int, (MPI_File fh, void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); +DARSHAN_EXTERN_DECL(PMPI_File_read_at_all_begin_c, int, (MPI_File fh, MPI_Offset offset, void *buf, MPI_Count count, MPI_Datatype datatype)); +DARSHAN_EXTERN_DECL(PMPI_File_read_at_all_c, int, (MPI_File fh, MPI_Offset offset, void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); +DARSHAN_EXTERN_DECL(PMPI_File_read_at_c, int, (MPI_File fh, MPI_Offset offset, void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); +DARSHAN_EXTERN_DECL(PMPI_File_read_c, int, (MPI_File fh, void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); +DARSHAN_EXTERN_DECL(PMPI_File_read_ordered_begin_c, int, (MPI_File fh, void *buf, MPI_Count count, MPI_Datatype datatype)); +DARSHAN_EXTERN_DECL(PMPI_File_read_ordered_c, int, (MPI_File fh, void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); +DARSHAN_EXTERN_DECL(PMPI_File_read_shared_c, int, (MPI_File fh, void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); +DARSHAN_EXTERN_DECL(PMPI_File_write_all_begin_c, int, (MPI_File fh, const void *buf, MPI_Count count, MPI_Datatype datatype)); +DARSHAN_EXTERN_DECL(PMPI_File_write_all_c, int, (MPI_File fh, const void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); +DARSHAN_EXTERN_DECL(PMPI_File_write_at_all_begin_c, int, (MPI_File fh, MPI_Offset offset, const void *buf, MPI_Count count, MPI_Datatype datatype)); +DARSHAN_EXTERN_DECL(PMPI_File_write_at_all_c, int, (MPI_File fh, MPI_Offset offset, const void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); +DARSHAN_EXTERN_DECL(PMPI_File_write_at_c, int, (MPI_File fh, MPI_Offset offset, const void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); +DARSHAN_EXTERN_DECL(PMPI_File_write_c, int, (MPI_File fh, const void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); +DARSHAN_EXTERN_DECL(PMPI_File_write_ordered_begin_c, int, (MPI_File fh, const void *buf, MPI_Count count, MPI_Datatype datatype)); +DARSHAN_EXTERN_DECL(PMPI_File_write_ordered_c, int, (MPI_File fh, const void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); +DARSHAN_EXTERN_DECL(PMPI_File_write_shared_c, int, (MPI_File fh, const void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); +#endif + #endif /* HAVE_MPI */ #endif diff --git a/darshan-runtime/lib/darshan-mpiio.c b/darshan-runtime/lib/darshan-mpiio.c index de8bc1414..15f12d9bc 100644 --- a/darshan-runtime/lib/darshan-mpiio.c +++ b/darshan-runtime/lib/darshan-mpiio.c @@ -1899,6 +1899,688 @@ static void mpiio_cleanup() return; } +#ifdef HAVE_MPI_LARGE_COUNT +/* MPI-IO large-count APIs */ + +DARSHAN_FORWARD_DECL(PMPI_File_iread_all_c, int, (MPI_File fh, void *buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); +DARSHAN_FORWARD_DECL(PMPI_File_iread_at_all_c, int, (MPI_File fh, MPI_Offset offset, void *buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); +DARSHAN_FORWARD_DECL(PMPI_File_iread_at_c, int, (MPI_File fh, MPI_Offset offset, void *buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); +DARSHAN_FORWARD_DECL(PMPI_File_iread_c, int, (MPI_File fh, void *buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); +DARSHAN_FORWARD_DECL(PMPI_File_iread_shared_c, int, (MPI_File fh, void *buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); +DARSHAN_FORWARD_DECL(PMPI_File_iwrite_all_c, int, (MPI_File fh, const void *buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); +DARSHAN_FORWARD_DECL(PMPI_File_iwrite_at_all_c, int, (MPI_File fh, MPI_Offset offset, const void *buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); +DARSHAN_FORWARD_DECL(PMPI_File_iwrite_at_c, int, (MPI_File fh, MPI_Offset offset, const void *buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); +DARSHAN_FORWARD_DECL(PMPI_File_iwrite_c, int, (MPI_File fh, const void *buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); +DARSHAN_FORWARD_DECL(PMPI_File_iwrite_shared_c, int, (MPI_File fh, const void *buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); +DARSHAN_FORWARD_DECL(PMPI_File_read_all_begin_c, int, (MPI_File fh, void *buf, MPI_Count count, MPI_Datatype datatype)); +DARSHAN_FORWARD_DECL(PMPI_File_read_all_c, int, (MPI_File fh, void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); +DARSHAN_FORWARD_DECL(PMPI_File_read_at_all_begin_c, int, (MPI_File fh, MPI_Offset offset, void *buf, MPI_Count count, MPI_Datatype datatype)); +DARSHAN_FORWARD_DECL(PMPI_File_read_at_all_c, int, (MPI_File fh, MPI_Offset offset, void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); +DARSHAN_FORWARD_DECL(PMPI_File_read_at_c, int, (MPI_File fh, MPI_Offset offset, void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); +DARSHAN_FORWARD_DECL(PMPI_File_read_c, int, (MPI_File fh, void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); +DARSHAN_FORWARD_DECL(PMPI_File_read_ordered_begin_c, int, (MPI_File fh, void *buf, MPI_Count count, MPI_Datatype datatype)); +DARSHAN_FORWARD_DECL(PMPI_File_read_ordered_c, int, (MPI_File fh, void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); +DARSHAN_FORWARD_DECL(PMPI_File_read_shared_c, int, (MPI_File fh, void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); +DARSHAN_FORWARD_DECL(PMPI_File_write_all_begin_c, int, (MPI_File fh, const void *buf, MPI_Count count, MPI_Datatype datatype)); +DARSHAN_FORWARD_DECL(PMPI_File_write_all_c, int, (MPI_File fh, const void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); +DARSHAN_FORWARD_DECL(PMPI_File_write_at_all_begin_c, int, (MPI_File fh, MPI_Offset offset, const void *buf, MPI_Count count, MPI_Datatype datatype)); +DARSHAN_FORWARD_DECL(PMPI_File_write_at_all_c, int, (MPI_File fh, MPI_Offset offset, const void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); +DARSHAN_FORWARD_DECL(PMPI_File_write_at_c, int, (MPI_File fh, MPI_Offset offset, const void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); +DARSHAN_FORWARD_DECL(PMPI_File_write_c, int, (MPI_File fh, const void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); +DARSHAN_FORWARD_DECL(PMPI_File_write_ordered_begin_c, int, (MPI_File fh, const void *buf, MPI_Count count, MPI_Datatype datatype)); +DARSHAN_FORWARD_DECL(PMPI_File_write_ordered_c, int, (MPI_File fh, const void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); +DARSHAN_FORWARD_DECL(PMPI_File_write_shared_c, int, (MPI_File fh, const void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)); + + +int DARSHAN_DECL(MPI_File_iread_all_c)(MPI_File fh, void * buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST * request) +{ + int ret; + double tm1, tm2; + MPI_Offset offset; + + MAP_OR_FAIL(PMPI_File_iread_all_c); + + MPI_File_get_position(fh, &offset); + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_iread_all_c(fh, buf, count, datatype, request); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_READ(ret, fh, count, datatype, offset, MPIIO_NB_READS, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_iread_all_c, int, (MPI_File fh, void * buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST * request), + MPI_File_iread_all_c) + +int DARSHAN_DECL(MPI_File_iread_at_all_c)(MPI_File fh, MPI_Offset offset, void * buf, + MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request) +{ + int ret; + double tm1, tm2; + + MAP_OR_FAIL(PMPI_File_iread_at_all_c); + + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_iread_at_all_c(fh, offset, buf, count, + datatype, request); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_READ(ret, fh, count, datatype, offset, MPIIO_NB_READS, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_iread_at_all_c, int, (MPI_File fh, MPI_Offset offset, void * buf, + MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request), + MPI_File_iread_at_all_c) + +int DARSHAN_DECL(MPI_File_iread_at_c)(MPI_File fh, MPI_Offset offset, void * buf, + MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request) +{ + int ret; + double tm1, tm2; + + MAP_OR_FAIL(PMPI_File_iread_at_c); + + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_iread_at_c(fh, offset, buf, count, + datatype, request); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_READ(ret, fh, count, datatype, offset, MPIIO_NB_READS, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_iread_at_c, int, (MPI_File fh, MPI_Offset offset, void * buf, + MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request), + MPI_File_iread_at_c) + +int DARSHAN_DECL(MPI_File_iread_c)(MPI_File fh, void * buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST * request) +{ + int ret; + double tm1, tm2; + MPI_Offset offset; + + MAP_OR_FAIL(PMPI_File_iread_c); + + MPI_File_get_position(fh, &offset); + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_iread_c(fh, buf, count, datatype, request); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_READ(ret, fh, count, datatype, offset, MPIIO_NB_READS, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_iread_c, int, (MPI_File fh, void * buf, MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST * request), + MPI_File_iread_c) + +int DARSHAN_DECL(MPI_File_iread_shared_c)(MPI_File fh, void * buf, MPI_Count count, + MPI_Datatype datatype, __D_MPI_REQUEST * request) +{ + int ret; + double tm1, tm2; + MPI_Offset offset; + + MAP_OR_FAIL(PMPI_File_iread_shared_c); + + MPI_File_get_position(fh, &offset); + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_iread_shared_c(fh, buf, count, + datatype, request); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_READ(ret, fh, count, datatype, offset, MPIIO_NB_READS, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_iread_shared_c, int, (MPI_File fh, void * buf, MPI_Count count, + MPI_Datatype datatype, __D_MPI_REQUEST * request), + MPI_File_iread_shared_c) + +int DARSHAN_DECL(MPI_File_iwrite_all_c)(MPI_File fh, const void * buf, MPI_Count count, + MPI_Datatype datatype, __D_MPI_REQUEST * request) +{ + int ret; + double tm1, tm2; + MPI_Offset offset; + + MAP_OR_FAIL(PMPI_File_iwrite_all_c); + + MPI_File_get_position(fh, &offset); + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_iwrite_all_c(fh, buf, count, datatype, request); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_WRITE(ret, fh, count, datatype, offset, MPIIO_NB_WRITES, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_iwrite_all_c, int, (MPI_File fh, const void * buf, MPI_Count count, + MPI_Datatype datatype, __D_MPI_REQUEST * request), + MPI_File_iwrite_all_c) + +int DARSHAN_DECL(MPI_File_iwrite_at_all_c)(MPI_File fh, MPI_Offset offset, const void * buf, + MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request) +{ + int ret; + double tm1, tm2; + + MAP_OR_FAIL(PMPI_File_iwrite_at_all_c); + + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_iwrite_at_all_c(fh, offset, buf, + count, datatype, request); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_WRITE(ret, fh, count, datatype, offset, MPIIO_NB_WRITES, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_iwrite_at_all_c, int, (MPI_File fh, MPI_Offset offset, const void * buf, + MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request), + MPI_File_iwrite_at_all_c) + +int DARSHAN_DECL(MPI_File_iwrite_at_c)(MPI_File fh, MPI_Offset offset, const void * buf, + MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request) +{ + int ret; + double tm1, tm2; + + MAP_OR_FAIL(PMPI_File_iwrite_at_c); + + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_iwrite_at_c(fh, offset, buf, + count, datatype, request); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_WRITE(ret, fh, count, datatype, offset, MPIIO_NB_WRITES, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_iwrite_at_c, int, (MPI_File fh, MPI_Offset offset, const void * buf, + MPI_Count count, MPI_Datatype datatype, __D_MPI_REQUEST *request), + MPI_File_iwrite_at_c) + +int DARSHAN_DECL(MPI_File_iwrite_c)(MPI_File fh, const void * buf, MPI_Count count, + MPI_Datatype datatype, __D_MPI_REQUEST * request) +{ + int ret; + double tm1, tm2; + MPI_Offset offset; + + MAP_OR_FAIL(PMPI_File_iwrite_c); + + MPI_File_get_position(fh, &offset); + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_iwrite_c(fh, buf, count, datatype, request); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_WRITE(ret, fh, count, datatype, offset, MPIIO_NB_WRITES, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_iwrite_c, int, (MPI_File fh, const void * buf, MPI_Count count, + MPI_Datatype datatype, __D_MPI_REQUEST * request), + MPI_File_iwrite_c) + +int DARSHAN_DECL(MPI_File_iwrite_shared_c)(MPI_File fh, const void * buf, MPI_Count count, + MPI_Datatype datatype, __D_MPI_REQUEST * request) +{ + int ret; + double tm1, tm2; + MPI_Offset offset; + + MAP_OR_FAIL(PMPI_File_iwrite_shared_c); + + MPI_File_get_position_shared(fh, &offset); + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_iwrite_shared_c(fh, buf, count, + datatype, request); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_WRITE(ret, fh, count, datatype, offset, MPIIO_NB_WRITES, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_iwrite_shared_c, int, (MPI_File fh, const void * buf, MPI_Count count, + MPI_Datatype datatype, __D_MPI_REQUEST * request), + MPI_File_iwrite_shared_c) + +int DARSHAN_DECL(MPI_File_read_all_begin_c)(MPI_File fh, void * buf, MPI_Count count, MPI_Datatype datatype) +{ + int ret; + double tm1, tm2; + MPI_Offset offset; + + MAP_OR_FAIL(PMPI_File_read_all_begin_c); + + MPI_File_get_position(fh, &offset); + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_read_all_begin_c(fh, buf, count, datatype); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_READ(ret, fh, count, datatype, offset, MPIIO_SPLIT_READS, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_read_all_begin_c, int, (MPI_File fh, void * buf, MPI_Count count, MPI_Datatype datatype), + MPI_File_read_all_begin_c) + +int DARSHAN_DECL(MPI_File_read_all_c)(MPI_File fh, void * buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status) +{ + int ret; + double tm1, tm2; + MPI_Offset offset; + + MAP_OR_FAIL(PMPI_File_read_all_c); + + MPI_File_get_position(fh, &offset); + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_read_all_c(fh, buf, count, + datatype, status); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_READ(ret, fh, count, datatype, offset, MPIIO_COLL_READS, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_read_all_c, int, (MPI_File fh, void * buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status), + MPI_File_read_all_c) + +int DARSHAN_DECL(MPI_File_read_at_all_begin_c)(MPI_File fh, MPI_Offset offset, void * buf, + MPI_Count count, MPI_Datatype datatype) +{ + int ret; + double tm1, tm2; + + MAP_OR_FAIL(PMPI_File_read_at_all_begin_c); + + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_read_at_all_begin_c(fh, offset, buf, + count, datatype); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_READ(ret, fh, count, datatype, offset, MPIIO_SPLIT_READS, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_read_at_all_begin_c, int, (MPI_File fh, MPI_Offset offset, void * buf, + MPI_Count count, MPI_Datatype datatype), MPI_File_read_at_all_begin_c) + +int DARSHAN_DECL(MPI_File_read_at_all_c)(MPI_File fh, MPI_Offset offset, void * buf, + MPI_Count count, MPI_Datatype datatype, MPI_Status * status) +{ + int ret; + double tm1, tm2; + + MAP_OR_FAIL(PMPI_File_read_at_all_c); + + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_read_at_all_c(fh, offset, buf, + count, datatype, status); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_READ(ret, fh, count, datatype, offset, MPIIO_COLL_READS, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_read_at_all_c, int, (MPI_File fh, MPI_Offset offset, void * buf, + MPI_Count count, MPI_Datatype datatype, MPI_Status * status), + MPI_File_read_at_all_c) + +int DARSHAN_DECL(MPI_File_read_at_c)(MPI_File fh, MPI_Offset offset, void *buf, + MPI_Count count, MPI_Datatype datatype, MPI_Status *status) +{ + int ret; + double tm1, tm2; + + MAP_OR_FAIL(PMPI_File_read_at_c); + + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_read_at_c(fh, offset, buf, + count, datatype, status); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_READ(ret, fh, count, datatype, offset, MPIIO_INDEP_READS, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_read_at_c, int, (MPI_File fh, MPI_Offset offset, void *buf, + MPI_Count count, MPI_Datatype datatype, MPI_Status *status), MPI_File_read_at_c) + +int DARSHAN_DECL(MPI_File_read_c)(MPI_File fh, void *buf, MPI_Count count, + MPI_Datatype datatype, MPI_Status *status) +{ + int ret; + double tm1, tm2; + MPI_Offset offset; + + MAP_OR_FAIL(PMPI_File_read_c); + + MPI_File_get_position(fh, &offset); + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_read_c(fh, buf, count, datatype, status); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_READ(ret, fh, count, datatype, offset, MPIIO_INDEP_READS, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_read_c, int, (MPI_File fh, void *buf, MPI_Count count, + MPI_Datatype datatype, MPI_Status *status), MPI_File_read_c) + +int DARSHAN_DECL(MPI_File_read_ordered_begin_c)(MPI_File fh, void * buf, MPI_Count count, MPI_Datatype datatype) +{ + int ret; + double tm1, tm2; + MPI_Offset offset; + + MAP_OR_FAIL(PMPI_File_read_ordered_begin_c); + + MPI_File_get_position_shared(fh, &offset); + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_read_ordered_begin_c(fh, buf, count, + datatype); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_READ(ret, fh, count, datatype, offset, MPIIO_SPLIT_READS, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_read_ordered_begin_c, int, (MPI_File fh, void * buf, MPI_Count count, MPI_Datatype datatype), + MPI_File_read_ordered_begin_c) + +int DARSHAN_DECL(MPI_File_read_ordered_c)(MPI_File fh, void * buf, MPI_Count count, + MPI_Datatype datatype, MPI_Status * status) +{ + int ret; + double tm1, tm2; + MPI_Offset offset; + + MAP_OR_FAIL(PMPI_File_read_ordered_c); + + MPI_File_get_position_shared(fh, &offset); + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_read_ordered_c(fh, buf, count, + datatype, status); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_READ(ret, fh, count, datatype, offset, MPIIO_COLL_READS, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_read_ordered_c, int, (MPI_File fh, void * buf, MPI_Count count, + MPI_Datatype datatype, MPI_Status * status), + MPI_File_read_ordered_c) + +int DARSHAN_DECL(MPI_File_read_shared_c)(MPI_File fh, void * buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status) +{ + int ret; + double tm1, tm2; + MPI_Offset offset; + + MAP_OR_FAIL(PMPI_File_read_shared_c); + + MPI_File_get_position_shared(fh, &offset); + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_read_shared_c(fh, buf, count, + datatype, status); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_READ(ret, fh, count, datatype, offset, MPIIO_INDEP_READS, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_read_shared_c, int, (MPI_File fh, void * buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status), + MPI_File_read_shared_c) + +int DARSHAN_DECL(MPI_File_write_all_begin_c)(MPI_File fh, const void * buf, MPI_Count count, MPI_Datatype datatype) +{ + int ret; + double tm1, tm2; + MPI_Offset offset; + + MAP_OR_FAIL(PMPI_File_write_all_begin_c); + + MPI_File_get_position(fh, &offset); + + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_write_all_begin_c(fh, buf, count, datatype); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_WRITE(ret, fh, count, datatype, offset, MPIIO_SPLIT_WRITES, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_write_all_begin_c, int, (MPI_File fh, const void * buf, MPI_Count count, MPI_Datatype datatype), + MPI_File_write_all_begin_c) + +int DARSHAN_DECL(MPI_File_write_all_c)(MPI_File fh, const void * buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status) +{ + int ret; + double tm1, tm2; + MPI_Offset offset; + + MAP_OR_FAIL(PMPI_File_write_all_c); + + MPI_File_get_position(fh, &offset); + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_write_all_c(fh, buf, count, + datatype, status); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_WRITE(ret, fh, count, datatype, offset, MPIIO_COLL_WRITES, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_write_all_c, int, (MPI_File fh, const void * buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status), + MPI_File_write_all_c) + +int DARSHAN_DECL(MPI_File_write_at_all_begin_c)(MPI_File fh, MPI_Offset offset, const void * buf, + MPI_Count count, MPI_Datatype datatype) +{ + int ret; + double tm1, tm2; + + MAP_OR_FAIL(PMPI_File_write_at_all_begin_c); + + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_write_at_all_begin_c(fh, offset, + buf, count, datatype); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_WRITE(ret, fh, count, datatype, offset, MPIIO_SPLIT_WRITES, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all_begin_c, int, (MPI_File fh, MPI_Offset offset, const void * buf, + MPI_Count count, MPI_Datatype datatype), MPI_File_write_at_all_begin_c) + +int DARSHAN_DECL(MPI_File_write_at_all_c)(MPI_File fh, MPI_Offset offset, const void * buf, + MPI_Count count, MPI_Datatype datatype, MPI_Status * status) +{ + int ret; + double tm1, tm2; + + MAP_OR_FAIL(PMPI_File_write_at_all_c); + + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_write_at_all_c(fh, offset, buf, + count, datatype, status); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_WRITE(ret, fh, count, datatype, offset, MPIIO_COLL_WRITES, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all_c, int, (MPI_File fh, MPI_Offset offset, const void * buf, + MPI_Count count, MPI_Datatype datatype, MPI_Status * status), + MPI_File_write_at_all_c) + +int DARSHAN_DECL(MPI_File_write_at_c)(MPI_File fh, MPI_Offset offset, const void *buf, + MPI_Count count, MPI_Datatype datatype, MPI_Status *status) +{ + int ret; + double tm1, tm2; + + MAP_OR_FAIL(PMPI_File_write_at_c); + + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_write_at_c(fh, offset, buf, + count, datatype, status); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_WRITE(ret, fh, count, datatype, offset, MPIIO_INDEP_WRITES, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_write_at_c, int, (MPI_File fh, MPI_Offset offset, const void *buf, + MPI_Count count, MPI_Datatype datatype, MPI_Status *status), MPI_File_write_at_c) + +int DARSHAN_DECL(MPI_File_write_c)(MPI_File fh, const void *buf, MPI_Count count, + MPI_Datatype datatype, MPI_Status *status) +{ + int ret; + double tm1, tm2; + MPI_Offset offset; + + MAP_OR_FAIL(PMPI_File_write_c); + + MPI_File_get_position(fh, &offset); + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_write_c(fh, buf, count, datatype, status); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_WRITE(ret, fh, count, datatype, offset, MPIIO_INDEP_WRITES, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_write_c, int, (MPI_File fh, const void *buf, MPI_Count count, + MPI_Datatype datatype, MPI_Status *status), MPI_File_write_c) + +int DARSHAN_DECL(MPI_File_write_ordered_begin_c)(MPI_File fh, const void * buf, MPI_Count count, MPI_Datatype datatype) +{ + int ret; + double tm1, tm2; + MPI_Offset offset; + + MAP_OR_FAIL(PMPI_File_write_ordered_begin_c); + + MPI_File_get_position_shared(fh, &offset); + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_write_ordered_begin_c(fh, buf, count, + datatype); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_WRITE(ret, fh, count, datatype, offset, MPIIO_SPLIT_WRITES, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_write_ordered_begin_c, int, (MPI_File fh, const void * buf, MPI_Count count, MPI_Datatype datatype), + MPI_File_write_ordered_begin_c) + +int DARSHAN_DECL(MPI_File_write_ordered_c)(MPI_File fh, const void * buf, MPI_Count count, + MPI_Datatype datatype, MPI_Status * status) +{ + int ret; + double tm1, tm2; + MPI_Offset offset; + + MAP_OR_FAIL(PMPI_File_write_ordered_c); + MPI_File_get_position_shared(fh, &offset); + + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_write_ordered_c(fh, buf, count, + datatype, status); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_WRITE(ret, fh, count, datatype, offset, MPIIO_COLL_WRITES, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_write_ordered_c, int, (MPI_File fh, const void * buf, MPI_Count count, + MPI_Datatype datatype, MPI_Status * status), + MPI_File_write_ordered_c) + +int DARSHAN_DECL(MPI_File_write_shared_c)(MPI_File fh, const void * buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status) +{ + int ret; + double tm1, tm2; + MPI_Offset offset; + + MAP_OR_FAIL(PMPI_File_write_shared_c); + + MPI_File_get_position_shared(fh, &offset); + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_write_shared_c(fh, buf, count, + datatype, status); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_WRITE(ret, fh, count, datatype, offset, MPIIO_INDEP_WRITES, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +DARSHAN_WRAPPER_MAP(PMPI_File_write_shared_c, int, (MPI_File fh, const void * buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status), + MPI_File_write_shared_c) + +#endif + /* * Local variables: * c-indent-level: 4 diff --git a/darshan-runtime/share/ld-opts/darshan-mpiio-ld-opts b/darshan-runtime/share/ld-opts/darshan-mpiio-ld-opts index 84828335d..97ac77db9 100644 --- a/darshan-runtime/share/ld-opts/darshan-mpiio-ld-opts +++ b/darshan-runtime/share/ld-opts/darshan-mpiio-ld-opts @@ -62,3 +62,59 @@ --wrap=PMPI_File_write_ordered --wrap=PMPI_File_write_ordered_begin --wrap=PMPI_File_write_shared +--wrap=MPI_File_iread_c +--wrap=MPI_File_iread_all_c +--wrap=MPI_File_iread_at_c +--wrap=MPI_File_iread_at_all_c +--wrap=MPI_File_iread_shared_c +--wrap=MPI_File_read_c +--wrap=MPI_File_read_all_c +--wrap=MPI_File_read_all_begin_c +--wrap=MPI_File_read_at_c +--wrap=MPI_File_read_at_all_c +--wrap=MPI_File_read_at_all_begin_c +--wrap=MPI_File_read_ordered_c +--wrap=MPI_File_read_ordered_begin_c +--wrap=MPI_File_read_shared_c +--wrap=MPI_File_iwrite_c +--wrap=MPI_File_iwrite_all_c +--wrap=MPI_File_iwrite_at_c +--wrap=MPI_File_iwrite_at_all_c +--wrap=MPI_File_iwrite_shared_c +--wrap=MPI_File_write_c +--wrap=MPI_File_write_all_c +--wrap=MPI_File_write_all_begin_c +--wrap=MPI_File_write_at_c +--wrap=MPI_File_write_at_all_c +--wrap=MPI_File_write_at_all_begin_c +--wrap=MPI_File_write_ordered_c +--wrap=MPI_File_write_ordered_begin_c +--wrap=MPI_File_write_shared_c +--wrap=PMPI_File_iread_c +--wrap=PMPI_File_iread_all_c +--wrap=PMPI_File_iread_at_c +--wrap=PMPI_File_iread_at_all_c +--wrap=PMPI_File_iread_shared_c +--wrap=PMPI_File_read_c +--wrap=PMPI_File_read_all_c +--wrap=PMPI_File_read_all_begin_c +--wrap=PMPI_File_read_at_c +--wrap=PMPI_File_read_at_all_c +--wrap=PMPI_File_read_at_all_begin_c +--wrap=PMPI_File_read_ordered_c +--wrap=PMPI_File_read_ordered_begin_c +--wrap=PMPI_File_read_shared_c +--wrap=PMPI_File_iwrite_c +--wrap=PMPI_File_iwrite_all_c +--wrap=PMPI_File_iwrite_at_c +--wrap=PMPI_File_iwrite_at_all_c +--wrap=PMPI_File_iwrite_shared_c +--wrap=PMPI_File_write_c +--wrap=PMPI_File_write_all_c +--wrap=PMPI_File_write_all_begin_c +--wrap=PMPI_File_write_at_c +--wrap=PMPI_File_write_at_all_c +--wrap=PMPI_File_write_at_all_begin_c +--wrap=PMPI_File_write_ordered_c +--wrap=PMPI_File_write_ordered_begin_c +--wrap=PMPI_File_write_shared_c From 798e31dab0c057fc1d30ff4a96b198453363773a Mon Sep 17 00:00:00 2001 From: wkliao Date: Mon, 4 Aug 2025 18:51:19 -0500 Subject: [PATCH 2/9] add missing PMPI_File_iwrite_all --- darshan-runtime/lib/darshan-mpiio.c | 37 +++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/darshan-runtime/lib/darshan-mpiio.c b/darshan-runtime/lib/darshan-mpiio.c index 15f12d9bc..5e5ce73fd 100644 --- a/darshan-runtime/lib/darshan-mpiio.c +++ b/darshan-runtime/lib/darshan-mpiio.c @@ -46,8 +46,10 @@ DARSHAN_FORWARD_DECL(PMPI_File_iwrite_at_all, int, (MPI_File fh, MPI_Offset offs #endif #ifdef HAVE_MPI_CONST DARSHAN_FORWARD_DECL(PMPI_File_iwrite, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); +DARSHAN_FORWARD_DECL(PMPI_File_iwrite_all, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); #else DARSHAN_FORWARD_DECL(PMPI_File_iwrite, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); +DARSHAN_FORWARD_DECL(PMPI_File_iwrite_all, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); #endif #ifdef HAVE_MPI_CONST DARSHAN_FORWARD_DECL(PMPI_File_iwrite_shared, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request)); @@ -987,6 +989,41 @@ DARSHAN_WRAPPER_MAP(PMPI_File_iwrite, int, (MPI_File fh, void * buf, int count, MPI_File_iwrite) #endif +#ifdef HAVE_MPI_CONST +int DARSHAN_DECL(MPI_File_iwrite_all)(MPI_File fh, const void * buf, int count, + MPI_Datatype datatype, __D_MPI_REQUEST * request) +#else +int DARSHAN_DECL(MPI_File_iwrite_all)(MPI_File fh, void * buf, int count, + MPI_Datatype datatype, __D_MPI_REQUEST * request) +#endif +{ + int ret; + double tm1, tm2; + MPI_Offset offset; + + MAP_OR_FAIL(PMPI_File_iwrite_all); + + MPI_File_get_position(fh, &offset); + tm1 = MPIIO_WTIME(); + ret = __real_PMPI_File_iwrite_all(fh, buf, count, datatype, request); + tm2 = MPIIO_WTIME(); + + MPIIO_PRE_RECORD(); + MPIIO_RECORD_WRITE(ret, fh, count, datatype, offset, MPIIO_NB_WRITES, tm1, tm2); + MPIIO_POST_RECORD(); + + return(ret); +} +#ifdef HAVE_MPI_CONST +DARSHAN_WRAPPER_MAP(PMPI_File_iwrite_all, int, (MPI_File fh, const void * buf, int count, + MPI_Datatype datatype, __D_MPI_REQUEST * request), + MPI_File_iwrite_all) +#else +DARSHAN_WRAPPER_MAP(PMPI_File_iwrite_all, int, (MPI_File fh, void * buf, int count, + MPI_Datatype datatype, __D_MPI_REQUEST * request), + MPI_File_iwrite_all) +#endif + int DARSHAN_DECL(MPI_File_iread_at)(MPI_File fh, MPI_Offset offset, void * buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request) { From 24cecf2b23895f377868d9968261b84946d3a3d6 Mon Sep 17 00:00:00 2001 From: wkliao Date: Mon, 4 Aug 2025 18:59:33 -0500 Subject: [PATCH 3/9] fix to call MPI_File_get_position_shared for MPI_File_iread_shared_c --- darshan-runtime/lib/darshan-mpiio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/darshan-runtime/lib/darshan-mpiio.c b/darshan-runtime/lib/darshan-mpiio.c index 5e5ce73fd..18f4fd807 100644 --- a/darshan-runtime/lib/darshan-mpiio.c +++ b/darshan-runtime/lib/darshan-mpiio.c @@ -2068,7 +2068,7 @@ int DARSHAN_DECL(MPI_File_iread_shared_c)(MPI_File fh, void * buf, MPI_Count cou MAP_OR_FAIL(PMPI_File_iread_shared_c); - MPI_File_get_position(fh, &offset); + MPI_File_get_position_shared(fh, &offset); tm1 = MPIIO_WTIME(); ret = __real_PMPI_File_iread_shared_c(fh, buf, count, datatype, request); From 7346629291b96bb946eabc34b8a713c0f23642b5 Mon Sep 17 00:00:00 2001 From: wkliao Date: Tue, 5 Aug 2025 14:43:50 -0500 Subject: [PATCH 4/9] Add an exhaust test for all 56 MPI file write and read APIs * when large-count feature is not available, test only the 28 non-large-count MPI-IO APIs --- darshan-runtime/Makefile.am | 2 +- darshan-runtime/configure.ac | 10 + darshan-runtime/test/Makefile.am | 25 ++ darshan-runtime/test/tst_mpi_io.c | 537 ++++++++++++++++++++++++++++++ darshan-runtime/test/tst_runs.sh | 277 +++++++++++++++ 5 files changed, 850 insertions(+), 1 deletion(-) create mode 100644 darshan-runtime/test/Makefile.am create mode 100644 darshan-runtime/test/tst_mpi_io.c create mode 100755 darshan-runtime/test/tst_runs.sh diff --git a/darshan-runtime/Makefile.am b/darshan-runtime/Makefile.am index 4a779e9a7..cdc4527b3 100644 --- a/darshan-runtime/Makefile.am +++ b/darshan-runtime/Makefile.am @@ -5,7 +5,7 @@ ACLOCAL_AMFLAGS = -I ../maint/config -SUBDIRS = lib pkgconfig share +SUBDIRS = lib pkgconfig share test bin_SCRIPTS = darshan-config \ darshan-gen-cc.pl \ diff --git a/darshan-runtime/configure.ac b/darshan-runtime/configure.ac index dba6f4dc6..5428bf208 100644 --- a/darshan-runtime/configure.ac +++ b/darshan-runtime/configure.ac @@ -807,7 +807,16 @@ if test "x$enable_darshan_runtime" = xyes ; then if test "x$have_mpi_io_large_count_apis" = "xyes" ; then AC_DEFINE(HAVE_MPI_LARGE_COUNT, 1, [Define if MPI-IO support large count feature]) + AC_SUBST(HAVE_MPI_LARGE_COUNT, [1]) fi + + ac_mpi_path=`AS_DIRNAME(["$CC"])` + if test "x$ac_mpi_path" = "x." ; then + AC_PATH_PROGS(mpicc_path, ${CC}) + ac_mpi_path=`AS_DIRNAME(["$mpicc_path"])` + fi + AC_PATH_PROGS(TESTMPIRUN, mpiexec mpirun, [], [$ac_mpi_path]) + AC_SUBST(TESTMPIRUN) else have_mpi_io_large_count_apis=no fi @@ -936,6 +945,7 @@ AC_CONFIG_FILES(Makefile \ darshan-gen-cxx.pl \ darshan-gen-fortran.pl \ lib/Makefile \ + test/Makefile \ pkgconfig/Makefile \ pkgconfig/darshan-runtime.pc \ share/Makefile \ diff --git a/darshan-runtime/test/Makefile.am b/darshan-runtime/test/Makefile.am new file mode 100644 index 000000000..2147e9a83 --- /dev/null +++ b/darshan-runtime/test/Makefile.am @@ -0,0 +1,25 @@ +# +# See COPYRIGHT notice in top-level directory. +# +# @configure_input@ + +AM_CPPFLAGS = -I$(top_builddir) + +check_PROGRAMS = + +#if BUILD_MPIIO_MODULE +check_PROGRAMS += tst_mpi_io +tst_mpi_io_SOURCES = tst_mpi_io.c +#endif + +AM_TESTS_ENVIRONMENT = export TESTMPIRUN="$(TESTMPIRUN)"; +AM_TESTS_ENVIRONMENT += export TST_DARSHAN_LOG_PATH="$(__DARSHAN_LOG_PATH)"; +AM_TESTS_ENVIRONMENT += export USERNAME_ENV="$(USERNAME_ENV)"; +AM_TESTS_ENVIRONMENT += export HAVE_MPI_LARGE_COUNT="$(HAVE_MPI_LARGE_COUNT)"; +AM_TESTS_ENVIRONMENT += export check_PROGRAMS="$(check_PROGRAMS)"; + +TESTS = tst_runs.sh +TEST_EXTENSIONS = .sh + +EXTRA_DIST = tst_runs.sh + diff --git a/darshan-runtime/test/tst_mpi_io.c b/darshan-runtime/test/tst_mpi_io.c new file mode 100644 index 000000000..ef6ef1011 --- /dev/null +++ b/darshan-runtime/test/tst_mpi_io.c @@ -0,0 +1,537 @@ +#ifdef HAVE_CONFIG_H +#include /* output of 'configure' */ +#endif + +#include +#include +#include +#include /* unlink() */ +#include + +#define CHECK_ERROR(fnc) { \ + if (rank == 0 && verbose && strcmp(fnc, "MPI_File_open") \ + && strcmp(fnc, "MPI_File_close") \ + && strcmp(fnc, "MPI_File_seek") \ + && strcmp(fnc, "MPI_Waitall") \ + && strcmp(fnc, "MPI_File_write_all_end") \ + && strcmp(fnc, "MPI_File_write_at_all_end") \ + && strcmp(fnc, "MPI_File_write_ordered_end") \ + && strcmp(fnc, "MPI_File_read_all_end") \ + && strcmp(fnc, "MPI_File_read_at_all_end") \ + && strcmp(fnc, "MPI_File_read_ordered_end")) \ + printf("---- testing %s\n",fnc); \ + if (err != MPI_SUCCESS) { \ + int errorStringLen; \ + char errorString[MPI_MAX_ERROR_STRING]; \ + MPI_Error_string(err, errorString, &errorStringLen); \ + printf("Error at line %d when calling %s: %s\n",__LINE__,fnc,errorString); \ + } \ +} + +static void +usage(char *argv0) +{ + char *help = + "Usage: %s [OPTIONS]\n" + " [-h] print this help\n" + " [-q] quiet mode\n" + " [-i] test read API\n" + " [-c] test collective API\n" + " [-a] test asynchonous API\n" + " [-s] test shared API\n" + " [-p] test split API\n" + " [-o] test ordered API\n" + " [-x] test explicit offset API\n" + " [-l] test large-count API\n"; + fprintf(stderr, help, argv0); +} + +#define NELEMS 8 + +/*----< main() >------------------------------------------------------------*/ +int main(int argc, char **argv) +{ + extern int optind; + extern char *optarg; + char filename[512], buf[NELEMS]; + int i, err, rank, np, verbose, omode; + int test_read, test_collective, test_async, test_shared; + int test_split, test_order, test_at, test_large_count; + MPI_Offset offset; + MPI_Count nbytes; + MPI_File fh; + MPI_Request req; + MPI_Status status; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &np); + + verbose = 1; + test_read = 0; + test_collective = 0; + test_async = 0; + test_shared = 0; + test_split = 0; + test_order = 0; + test_at = 0; + test_large_count = 0; + + while ((i = getopt(argc, argv, "hqdicaspoxl")) != EOF) + switch(i) { + case 'q': verbose = 0; + break; + case 'i': test_read = 1; + break; + case 'c': test_collective = 1; + break; + case 'a': test_async = 1; + break; + case 's': test_shared = 1; + break; + case 'p': test_split = 1; + break; + case 'o': test_order = 1; + break; + case 'x': test_at = 1; + break; + case 'l': test_large_count = 1; + break; + case 'h': + default: if (rank==0) usage(argv[0]); + MPI_Finalize(); + return 1; + } + +#ifndef HAVE_MPI_LARGE_COUNT + if (test_large_count == 1) { + printf("The underlying MPI-IO does not support large-count feature... skip\n"); + MPI_Finalize(); + return 0; + } +#endif + + if (argv[optind] == NULL) strcpy(filename, "testfile.dat"); + else snprintf(filename, 512, "%s", argv[optind]); + + offset = rank * NELEMS; + nbytes = NELEMS; + for (i=0; i Date: Sat, 9 Aug 2025 16:42:20 -0500 Subject: [PATCH 5/9] add make check to CI --- darshan-runtime/test/Makefile.am | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/darshan-runtime/test/Makefile.am b/darshan-runtime/test/Makefile.am index 2147e9a83..f4a2664ff 100644 --- a/darshan-runtime/test/Makefile.am +++ b/darshan-runtime/test/Makefile.am @@ -7,10 +7,10 @@ AM_CPPFLAGS = -I$(top_builddir) check_PROGRAMS = -#if BUILD_MPIIO_MODULE -check_PROGRAMS += tst_mpi_io -tst_mpi_io_SOURCES = tst_mpi_io.c -#endif +if BUILD_MPIIO_MODULE + check_PROGRAMS += tst_mpi_io + tst_mpi_io_SOURCES = tst_mpi_io.c +endif AM_TESTS_ENVIRONMENT = export TESTMPIRUN="$(TESTMPIRUN)"; AM_TESTS_ENVIRONMENT += export TST_DARSHAN_LOG_PATH="$(__DARSHAN_LOG_PATH)"; From 3b952bb971cb7bf70f36251906c12e9604f9c74a Mon Sep 17 00:00:00 2001 From: Wei-keng Liao Date: Wed, 6 Aug 2025 15:32:49 -0500 Subject: [PATCH 6/9] Github action regression test: update to use MPICH 4.3.1 The full support of large-count feature in MPICH starts from version 4.2.2 --- .github/workflows/end_to_end_regression.yml | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/.github/workflows/end_to_end_regression.yml b/.github/workflows/end_to_end_regression.yml index b2de5ee89..ef34f9023 100644 --- a/.github/workflows/end_to_end_regression.yml +++ b/.github/workflows/end_to_end_regression.yml @@ -8,6 +8,9 @@ on: branches: - main +env: + MPICH_VERSION: 4.3.1 + jobs: end_to_end_regression: strategy: @@ -22,16 +25,16 @@ jobs: sudo apt-get install -y gfortran bc - name: Install MPICH run: | + echo "Install MPICH ${MPICH_VERSION} in ${PWD}/mpich_install" mkdir mpich_install export MPICH_INSTALL_PATH=$PWD/mpich_install - wget https://www.mpich.org/static/downloads/3.2.1/mpich-3.2.1.tar.gz - tar -xzvf mpich-3.2.1.tar.gz - cd mpich-3.2.1 + wget -q https://www.mpich.org/static/downloads/${MPICH_VERSION}/mpich-${MPICH_VERSION}.tar.gz + tar -xzvf mpich-${MPICH_VERSION}.tar.gz + cd mpich-${MPICH_VERSION} mkdir build cd build FFLAGS="-w -fallow-argument-mismatch" ../configure --disable-dependency-tracking --prefix=$MPICH_INSTALL_PATH - make - make install + make -j8 install - name: Install Darshan run: | git submodule update --init From d89957d9182c02eebe02bc25aa10e1f88b436ed6 Mon Sep 17 00:00:00 2001 From: Wei-keng Liao Date: Sat, 9 Aug 2025 17:05:50 -0500 Subject: [PATCH 7/9] Add more env tests into darshan-runtime/test/tst_runs.sh * check file system of log folder * allow setting env variable NP, number of MPI processes * run `darshan-config --all` to dump Darshan's configuration --- darshan-runtime/test/Makefile.am | 1 + darshan-runtime/test/tst_runs.sh | 52 +++++++++++++++++++++++++++----- 2 files changed, 45 insertions(+), 8 deletions(-) diff --git a/darshan-runtime/test/Makefile.am b/darshan-runtime/test/Makefile.am index f4a2664ff..d365f0fe2 100644 --- a/darshan-runtime/test/Makefile.am +++ b/darshan-runtime/test/Makefile.am @@ -14,6 +14,7 @@ endif AM_TESTS_ENVIRONMENT = export TESTMPIRUN="$(TESTMPIRUN)"; AM_TESTS_ENVIRONMENT += export TST_DARSHAN_LOG_PATH="$(__DARSHAN_LOG_PATH)"; +AM_TESTS_ENVIRONMENT += export DARSHAN_INSTALL_DIR="$(prefix)"; AM_TESTS_ENVIRONMENT += export USERNAME_ENV="$(USERNAME_ENV)"; AM_TESTS_ENVIRONMENT += export HAVE_MPI_LARGE_COUNT="$(HAVE_MPI_LARGE_COUNT)"; AM_TESTS_ENVIRONMENT += export check_PROGRAMS="$(check_PROGRAMS)"; diff --git a/darshan-runtime/test/tst_runs.sh b/darshan-runtime/test/tst_runs.sh index f30a1dd0e..7f73b573b 100755 --- a/darshan-runtime/test/tst_runs.sh +++ b/darshan-runtime/test/tst_runs.sh @@ -7,21 +7,45 @@ TODAY_DATE_PATH=`date "+%Y/%-m/%-d"` TST_DARSHAN_LOG_PATH="${TST_DARSHAN_LOG_PATH}/${TODAY_DATE_PATH}" mkdir -p ${TST_DARSHAN_LOG_PATH} +# check what file system is used +echo "df -T ${TST_DARSHAN_LOG_PATH}" +df -T ${TST_DARSHAN_LOG_PATH} + +echo "findmnt -n -o FSTYPE --target ${TST_DARSHAN_LOG_PATH}" +findmnt -n -o FSTYPE --target ${TST_DARSHAN_LOG_PATH} + if test "x$USERNAME_ENV" = xno ; then USERNAME_ENV=$USER fi -DARSGAN_PARSER=../../darshan-util/darshan-parser +if test -f $DARSHAN_INSTALL_DIR/bin/darshan-parser ; then + DARSHAN_PARSER=$DARSHAN_INSTALL_DIR/bin/darshan-parser +else + DARSHAN_PARSER=../../darshan-util/darshan-parser +fi +echo "DARSHAN_PARSER=$DARSHAN_PARSER" + +if test -f $DARSHAN_INSTALL_DIR/bin/darshan-config ; then + DARSHAN_CONFIG=$DARSHAN_INSTALL_DIR/bin/darshan-config +else + DARSHAN_CONFIG=../../darshan-util/darshan-config +fi +echo "DARSHAN_CONFIG=$DARSHAN_CONFIG" + +$DARSHAN_CONFIG --all # run NP number of MPI processes -NP=4 +# Note when using OpenMPI, setting NP > 2 will fail. +if test "x$NP" = x ; then + NP=2 +fi TEST_FILE=./testfile.dat # tst_mpi_io.c takes the following command-line options. # [-i] test read API # [-c] test collective API -# [-a] test asynchonous API +# [-a] test asynchronous API # [-s] test shared API # [-p] test split API # [-o] test ordered API @@ -226,7 +250,12 @@ done echo "OPTS=$OPTS" -export LD_PRELOAD=../lib/.libs/libdarshan.so +if test -f $DARSHAN_INSTALL_DIR/lib/libdarshan.so ; then + export LD_PRELOAD=$DARSHAN_INSTALL_DIR/lib/libdarshan.so +else + export LD_PRELOAD=../lib/.libs/libdarshan.so +fi +echo "LD_PRELOAD=$LD_PRELOAD" for exe in ${check_PROGRAMS} ; do @@ -242,11 +271,16 @@ for exe in ${check_PROGRAMS} ; do else CMD="${TESTMPIRUN} -n ${NP} ./$exe -$opt $TEST_FILE" fi - # echo "CMD=$CMD" + echo "CMD=$CMD" rm -f $TEST_FILE $DARSHAN_LOG_FILE $CMD + + echo "ls -l ${DARSHAN_LOG_FILE}" + ls -l ${DARSHAN_LOG_FILE} + + echo "parsing ${DARSHAN_LOG_FILE}" EXPECT_NBYTE=`stat -c %s $TEST_FILE` - nbytes=`$DARSGAN_PARSER ${DARSHAN_LOG_FILE} | grep $DARSGAN_FIELD | cut -f5` + nbytes=`$DARSHAN_PARSER ${DARSHAN_LOG_FILE} | grep $DARSGAN_FIELD | cut -f5` # echo "EXPECT_NBYTE=$EXPECT_NBYTE nbytes=$nbytes" if test "x$nbytes" != "x$EXPECT_NBYTE" ; then echo "Error: CMD=$CMD nbytes=$nbytes" @@ -262,10 +296,12 @@ for exe in ${check_PROGRAMS} ; do else CMD="${TESTMPIRUN} -n ${NP} ./$exe -$opt -i $TEST_FILE" fi - # echo "CMD=$CMD" + echo "CMD=$CMD" rm -f $DARSHAN_LOG_FILE $CMD - nbytes=`$DARSGAN_PARSER ${DARSHAN_LOG_FILE} | grep $DARSGAN_FIELD | cut -f5` + + echo "parsing ${DARSHAN_LOG_FILE}" + nbytes=`$DARSHAN_PARSER ${DARSHAN_LOG_FILE} | grep $DARSGAN_FIELD | cut -f5` # echo "EXPECT_NBYTE=$EXPECT_NBYTE nbytes=$nbytes" if test "x$nbytes" != "x$EXPECT_NBYTE" ; then echo "Error: CMD=$CMD nbytes=$nbytes" From 9be9f3ec5a0afee6d9373e34e58fa4b971d9efcb Mon Sep 17 00:00:00 2001 From: wkliao Date: Mon, 18 Aug 2025 22:16:49 -0500 Subject: [PATCH 8/9] Add --oversubscribe to mpiexec command line if OpenMPI is used --- darshan-runtime/configure.ac | 9 +++++++-- darshan-runtime/test/Makefile.am | 1 + darshan-runtime/test/tst_runs.sh | 7 +++++-- 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/darshan-runtime/configure.ac b/darshan-runtime/configure.ac index 5428bf208..b36444c8a 100644 --- a/darshan-runtime/configure.ac +++ b/darshan-runtime/configure.ac @@ -690,8 +690,11 @@ if test "x$enable_darshan_runtime" = xyes ; then #error OPENMPI FOUND #endif ])], - [], - [AC_DEFINE(HAVE_OPEN_MPI, 1, Define if OpenMPI is being used)]) + [is_ompi=0], [is_ompi=1]) + + if test "x$is_ompi" = x1 ; then + AC_DEFINE(HAVE_OPEN_MPI, 1, [Define if OpenMPI is being used]) + fi # determine if the MPI library includes MPI-IO functions or not AC_MSG_CHECKING(for MPI-IO support in MPI) @@ -883,6 +886,7 @@ else with_log_path= with_jobid_env= have_mpi_io_large_count_apis=no + is_ompi=0 fi AC_SUBST(ENABLE_LD_PRELOAD, ["$enable_ld_preload"]) @@ -918,6 +922,7 @@ AC_SUBST(HDF5_PATH, ["$with_hdf5"]) AC_SUBST(PNETCDF_PATH, ["$with_pnetcdf"]) AC_SUBST(DAOS_PATH, ["$with_daos"]) AC_SUBST(LDMS_PATH, ["$LDMS_HOME"]) +AC_SUBST(HAVE_OPEN_MPI, ["$is_ompi"]) AM_CONDITIONAL(ENABLE_MMAP_LOGS, [test "x$enable_mmap_logs" = xyes]) AM_CONDITIONAL(ENABLE_LDPRELOAD, [test "x$enable_ld_preload" = xyes]) diff --git a/darshan-runtime/test/Makefile.am b/darshan-runtime/test/Makefile.am index d365f0fe2..4e15d25e0 100644 --- a/darshan-runtime/test/Makefile.am +++ b/darshan-runtime/test/Makefile.am @@ -17,6 +17,7 @@ AM_TESTS_ENVIRONMENT += export TST_DARSHAN_LOG_PATH="$(__DARSHAN_LOG_PATH)"; AM_TESTS_ENVIRONMENT += export DARSHAN_INSTALL_DIR="$(prefix)"; AM_TESTS_ENVIRONMENT += export USERNAME_ENV="$(USERNAME_ENV)"; AM_TESTS_ENVIRONMENT += export HAVE_MPI_LARGE_COUNT="$(HAVE_MPI_LARGE_COUNT)"; +AM_TESTS_ENVIRONMENT += export HAVE_OPEN_MPI="$(HAVE_OPEN_MPI)"; AM_TESTS_ENVIRONMENT += export check_PROGRAMS="$(check_PROGRAMS)"; TESTS = tst_runs.sh diff --git a/darshan-runtime/test/tst_runs.sh b/darshan-runtime/test/tst_runs.sh index 7f73b573b..00a98cb9f 100755 --- a/darshan-runtime/test/tst_runs.sh +++ b/darshan-runtime/test/tst_runs.sh @@ -34,12 +34,15 @@ echo "DARSHAN_CONFIG=$DARSHAN_CONFIG" $DARSHAN_CONFIG --all -# run NP number of MPI processes -# Note when using OpenMPI, setting NP > 2 will fail. +# run NP number of MPI processes, default 2 if test "x$NP" = x ; then NP=2 fi +if test "x$HAVE_OPEN_MPI" = x1 ; then + TESTMPIRUN="$TESTMPIRUN --oversubscribe" +fi + TEST_FILE=./testfile.dat # tst_mpi_io.c takes the following command-line options. From 7212fb998906d351d58c0ba713c9f3fd5d28aba9 Mon Sep 17 00:00:00 2001 From: Wei-keng Liao Date: Sat, 9 Aug 2025 17:03:47 -0500 Subject: [PATCH 9/9] Github CI: test using MPICH 4.3.1 and OpenMPI 5.0.8 --- .github/workflows/mpich_openmpi.yml | 132 ++++++++++++++++++++++++++++ 1 file changed, 132 insertions(+) create mode 100644 .github/workflows/mpich_openmpi.yml diff --git a/.github/workflows/mpich_openmpi.yml b/.github/workflows/mpich_openmpi.yml new file mode 100644 index 000000000..58b0cd9ee --- /dev/null +++ b/.github/workflows/mpich_openmpi.yml @@ -0,0 +1,132 @@ +name: Test MPICH and OpenMPI + +on: + push: + branches: + - main + pull_request: + branches: + - main + +env: + MPICH_VERSION: 4.3.1 + OPENMPI_VERSION: 5.0.8 + +jobs: + build: + strategy: + matrix: + platform: [ubuntu-latest] + runs-on: ${{ matrix.platform }} + steps: + - uses: actions/checkout@v4 + - name: Install dependencies + run: | + sudo apt-get update -y + - name: Build MPICH + run: | + cd ${GITHUB_WORKSPACE} + echo "Install MPICH ${MPICH_VERSION} in ${GITHUB_WORKSPACE}/MPICH" + rm -rf MPICH ; mkdir MPICH ; cd MPICH + wget -q https://www.mpich.org/static/downloads/${MPICH_VERSION}/mpich-${MPICH_VERSION}.tar.gz + gzip -dc mpich-${MPICH_VERSION}.tar.gz | tar -xf - + cd mpich-${MPICH_VERSION} + ./configure --prefix=${GITHUB_WORKSPACE}/MPICH \ + --silent \ + --enable-romio \ + --with-file-system=ufs \ + --with-device=ch3:sock \ + --disable-fortran \ + CC=gcc + make -s LIBTOOLFLAGS=--silent V=1 -j 8 install > qout 2>&1 + make -s -j 8 distclean >> qout 2>&1 + - name: Build OPENMPI + run: | + cd ${GITHUB_WORKSPACE} + echo "Install OPENMPI ${OPENMPI_VERSION} in ${GITHUB_WORKSPACE}/OPENMPI" + rm -rf OPENMPI ; mkdir OPENMPI ; cd OPENMPI + VER_MAJOR=${OPENMPI_VERSION%.*} + wget -q https://download.open-mpi.org/release/open-mpi/v${VER_MAJOR}/openmpi-${OPENMPI_VERSION}.tar.gz + gzip -dc openmpi-${OPENMPI_VERSION}.tar.gz | tar -xf - + cd openmpi-${OPENMPI_VERSION} + ./configure --prefix=${GITHUB_WORKSPACE}/OPENMPI \ + --silent \ + --with-io-romio-flags="--with-file-system=ufs" \ + --disable-mpi-cxx --disable-mpi-fortran \ + CC=gcc + make -s LIBTOOLFLAGS=--silent V=1 -j 8 install > qout 2>&1 + make -s -j 8 distclean >> qout 2>&1 + - name: Initialize Darshan + run: | + git submodule update --init + autoreconf -i + - name: Build Darshan using MPICH + run: | + export PATH="${GITHUB_WORKSPACE}/MPICH/bin:${PATH}" + export DARSHAN_ROOT="${GITHUB_WORKSPACE}" + export DARSHAN_LOG_PATH="${GITHUB_WORKSPACE}/LOGS" + export DARSHAN_INSTALL="${GITHUB_WORKSPACE}/INSTALL" + export DARSHAN_BUILD="${GITHUB_WORKSPACE}/BUILD" + rm -rf ${DARSHAN_LOG_PATH} ${DARSHAN_BUILD} ${DARSHAN_INSTALL} + mkdir -p ${DARSHAN_LOG_PATH} ${DARSHAN_BUILD} + cd ${DARSHAN_BUILD} + $DARSHAN_ROOT/configure --prefix=${DARSHAN_INSTALL} \ + --with-log-path=${DARSHAN_LOG_PATH} \ + --with-jobid-env=NONE \ + CC=mpicc RUNTIME_CC=mpicc UTIL_CC=gcc + make -s LIBTOOLFLAGS=--silent V=1 -j8 + make -s install + - name: make check (MPICH) + run: | + export PATH="${GITHUB_WORKSPACE}/MPICH/bin:${PATH}" + cd ${GITHUB_WORKSPACE}/BUILD + make check + - name: Print test log files (MPICH) + if: ${{ always() }} + run: | + cat ${GITHUB_WORKSPACE}/BUILD/darshan-runtime/test/tst_runs.log + - name: make check (MPICH) running 4 processes + run: | + export PATH="${GITHUB_WORKSPACE}/MPICH/bin:${PATH}" + cd ${GITHUB_WORKSPACE}/BUILD + make check NP=4 + - name: Print test log files (MPICH) running 4 processes + if: ${{ always() }} + run: | + cat ${GITHUB_WORKSPACE}/BUILD/darshan-runtime/test/tst_runs.log + + - name: Build Darshan using OpenMPI + run: | + export PATH="${GITHUB_WORKSPACE}/OPENMPI/bin:${PATH}" + export DARSHAN_ROOT="${GITHUB_WORKSPACE}" + export DARSHAN_LOG_PATH="${GITHUB_WORKSPACE}/LOGS" + export DARSHAN_INSTALL="${GITHUB_WORKSPACE}/INSTALL" + export DARSHAN_BUILD="${GITHUB_WORKSPACE}/BUILD" + rm -rf ${DARSHAN_LOG_PATH} ${DARSHAN_BUILD} ${DARSHAN_INSTALL} + mkdir -p ${DARSHAN_LOG_PATH} ${DARSHAN_BUILD} + cd ${DARSHAN_BUILD} + $DARSHAN_ROOT/configure --prefix=${DARSHAN_INSTALL} \ + --with-log-path=${DARSHAN_LOG_PATH} \ + --with-jobid-env=NONE \ + CC=mpicc RUNTIME_CC=mpicc UTIL_CC=gcc + make -s LIBTOOLFLAGS=--silent V=1 -j8 + make -s install + - name: make check (OpenMPI) + run: | + export PATH="${GITHUB_WORKSPACE}/OPENMPI/bin:${PATH}" + cd ${GITHUB_WORKSPACE}/BUILD + make check + - name: Print test log files (OpenMPI) + if: ${{ always() }} + run: | + cat ${GITHUB_WORKSPACE}/BUILD/darshan-runtime/test/tst_runs.log + - name: make check (OpenMPI) running 4 processes + run: | + export PATH="${GITHUB_WORKSPACE}/OPENMPI/bin:${PATH}" + cd ${GITHUB_WORKSPACE}/BUILD + make check NP=4 + - name: Print test log files (OpenMPI) running 4 processes + if: ${{ always() }} + run: | + cat ${GITHUB_WORKSPACE}/BUILD/darshan-runtime/test/tst_runs.log +