diff --git a/benchmarks/WRF-IO/parallel_run.sh b/benchmarks/WRF-IO/parallel_run.sh index a8c81d4f3..a6cf24d7c 100755 --- a/benchmarks/WRF-IO/parallel_run.sh +++ b/benchmarks/WRF-IO/parallel_run.sh @@ -38,46 +38,26 @@ for i in ${check_PROGRAMS} ; do if test "$intra_aggr" = 1 ; then export PNETCDF_HINTS="${PNETCDF_HINTS};nc_num_aggrs_per_node=2" fi + # echo "PNETCDF_HINTS=${PNETCDF_HINTS}" + export PNETCDF_SAFE_MODE=$j # echo "set PNETCDF_SAFE_MODE ${PNETCDF_SAFE_MODE}" - OPTS="-l 100 -w 100 -i ${srcdir}/wrf_header.txt" - # echo "${MPIRUN} ./$i -q ${OPTS} ${TESTOUTDIR}/$i.nc" - ${MPIRUN} ./$i -q ${OPTS} ${TESTOUTDIR}/$i.nc + OPTS="-y 100 -x 100 -i ${srcdir}/wrf_header.txt" + OPTS="$OPTS -w ${TESTOUTDIR}/$i.nc -r ${TESTOUTDIR}/$i.nc" + # echo "${MPIRUN} ./$i -q ${OPTS}" + ${MPIRUN} ./$i -q ${OPTS} if test $? = 0 ; then echo "PASS: C parallel run on $1 processes --------------- $i" fi + unset PNETCDF_HINTS # echo "--- validating file ${TESTOUTDIR}/$i.nc" ${TESTSEQRUN} ${VALIDATOR} -q ${TESTOUTDIR}/$i.nc # echo "" - - if test "x${ENABLE_BURST_BUFFER}" = x1 ; then - # echo "test burst buffering feature" - saved_PNETCDF_HINTS=${PNETCDF_HINTS} - export PNETCDF_HINTS="${PNETCDF_HINTS};nc_burst_buf=enable;nc_burst_buf_dirname=${TESTOUTDIR};nc_burst_buf_overwrite=enable" - ${MPIRUN} ./$i -q ${OPTS} ${TESTOUTDIR}/$i.bb.nc - if test $? = 0 ; then - echo "PASS: C parallel run on $1 processes --------------- $i" - fi - export PNETCDF_HINTS=${saved_PNETCDF_HINTS} - - # echo "--- validating file ${TESTOUTDIR}/$i.bb.nc" - ${TESTSEQRUN} ${VALIDATOR} -q ${TESTOUTDIR}/$i.bb.nc - - # echo "--- ncmpidiff $i.nc $i.bb.nc ---" - ${MPIRUN} ${NCMPIDIFF} -q ${TESTOUTDIR}/$i.nc ${TESTOUTDIR}/$i.bb.nc - fi - - if test "x${ENABLE_NETCDF4}" = x1 ; then - # echo "test netCDF-4 feature" - ${MPIRUN} ./$i -q ${OPTS} ${TESTOUTDIR}/$i.nc4 4 - # Validator does not support nc4 - fi done done rm -f ${OUTDIR}/$i.nc - rm -f ${OUTDIR}/$i.bb.nc rm -f ${OUTDIR}/$i.nc4 done diff --git a/benchmarks/WRF-IO/wrf_io.c b/benchmarks/WRF-IO/wrf_io.c index c844458e7..8199bd2df 100644 --- a/benchmarks/WRF-IO/wrf_io.c +++ b/benchmarks/WRF-IO/wrf_io.c @@ -6,12 +6,12 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * This program evaluates the file write performance of WRF (Wether Research - * and Forecast Model, https://github.com/wrf-model/WRF) developed at NCAR. - * It's data partitioning pattern is a 2D block-block checkerboard pattern, - * along the longitude and latitude. This benchmark program reads a CDL header - * file in text format (i.e. a netCDF file obtained from "ncmpidump -h") and - * creates a new file with the same metadata. + * This program evaluates the file write and read performance of WRF (Wether + * Research and Forecast Model, https://github.com/wrf-model/WRF) developed at + * NCAR. It's data partitioning pattern is a 2D block-block checkerboard + * pattern, along the longitude and latitude. This benchmark program reads a + * CDL header file in text format (i.e. a netCDF file obtained from "ncmpidump + * -h") and creates a new file with the same metadata. * * The provided CDL file "wrf_header.txt" contains 202 variables. Among them, * 30 are 1D variables. 25 are 2D variables, 122 are 3D, and 25 are 4D. Only 3D @@ -26,25 +26,25 @@ * % mpicc -O2 wrf_io.c -o wrf_io -lpnetcdf * * An example of run command: - * % mpiexec -n 4 ./wrf_io -l 100 -w 100 -n 2 -i wrf_header.txt ./wrf_io.nc + * % mpiexec -n 4 ./wrf_io -y 100 -x 100 -n 2 -i wrf_header.txt -w ./wrf_io.nc * * ----------------------------------------------------------- * ---- WRF-IO write benchmark ---- - * Output NetCDF file name: ./wrf_io.nc - * Number of MPI processes: 4 - * MPI processes arranged to 2D: 2 x 2 - * Grid size logitute x latitute: 100 x 100 - * Total number of variables: 202 - * Number of time records: 2 - * Total write amount: 68696794 B - * 65.51 MiB - * 0.06 GiB - * Max open-to-close time: 0.0978 sec - * Max define metadata time: 0.0085 sec - * Max bput posting time: 0.0148 sec - * Max wait_all time: 0.0718 sec - * Write bandwidth: 670.08 MiB/s - * 0.65 GiB/s + * Output NetCDF file name: ./wrf_io.nc + * Number of MPI processes: 4 + * MPI processes arranged to 2D: 2 x 2 + * Grid size longitude x latitude: 100 x 100 + * Total number of variables: 202 + * Number of time records: 2 + * Total write amount: 68696794 B + * 65.51 MiB + * 0.06 GiB + * Max open-to-close time: 0.0978 sec + * Max define metadata time: 0.0085 sec + * Max bput posting time: 0.0148 sec + * Max wait_all time: 0.0718 sec + * Write bandwidth: 670.08 MiB/s + * 0.65 GiB/s * ----------------------------------------------------------- * MPI-IO hint striping_factor: 0 * MPI-IO hint striping_unit: 0 @@ -90,8 +90,8 @@ typedef struct { static int construct_vars(int hid, /* CDL header ID */ WRF_VAR *vars, - MPI_Offset *logitute, - MPI_Offset *latitute, + MPI_Offset *longitude, + MPI_Offset *latitude, int psizes[2], MPI_Offset *buf_size) { @@ -101,30 +101,30 @@ int construct_vars(int hid, /* CDL header ID */ MPI_Offset dimlen, my_start_y, my_start_x, my_count_y, my_count_x; nc_type xtype; - /* determine whether to use logitute, latitute set at command line */ + /* determine whether to use longitude, latitude set at command line */ err = cdl_hdr_inq_ndims(hid, &ndims); CHECK_ERR("cdl_hdr_inq_ndims") - if (*logitute <= 0) { + if (*longitude <= 0) { for (i=0; i 0) { + err = ncmpi_inq_dimlen(ncid, dimids[0], &dim0); + CHECK_ERR("ncmpi_inq_dimlen") + err = ncmpi_inq_dimname(ncid, dimids[0], dname0); + CHECK_ERR("ncmpi_inq_dimname") + } + if (ndims > 1) { + err = ncmpi_inq_dimlen(ncid, dimids[1], &dim1); + CHECK_ERR("ncmpi_inq_dimlen") + err = ncmpi_inq_dimname(ncid, dimids[1], dname1); + CHECK_ERR("ncmpi_inq_dimname") + } + if (ndims > 2) { + err = ncmpi_inq_dimlen(ncid, dimids[2], &dim2); + CHECK_ERR("ncmpi_inq_dimlen") + err = ncmpi_inq_dimname(ncid, dimids[2], dname2); + CHECK_ERR("ncmpi_inq_dimname") + } + if (ndims > 3) { + err = ncmpi_inq_dimlen(ncid, dimids[3], &dim3); + CHECK_ERR("ncmpi_inq_dimlen") + err = ncmpi_inq_dimname(ncid, dimids[3], dname3); + CHECK_ERR("ncmpi_inq_dimname") + } + + vars->varid = i; + vars->name = strdup(name); + vars->ndims = ndims; + vars->xtype = xtype; + vars->nelems = 0; + vars->dimids = (int*) malloc(sizeof(int) * ndims); + for (j=0; jdimids[j] = dimids[j]; + + vars->start[0] = 0; /* time dimension */ + vars->count[0] = 1; /* time dimension */ + + /* In WRF, the first dimension is always NC_UNLIMITED */ + if (ndims == 1) + vars->nelems = (rank == 0) ? 1 : 0; + else if (ndims == 2) { + vars->nelems = (rank == 0) ? dim1 : 0; + vars->count[1] = dim1; /* dimension dim1 is not partitioned */ + vars->start[1] = 0; /* dimension dim1 is not partitioned */ + } + else if (ndims == 3) { + vars->start[1] = my_start_y; + vars->count[1] = my_count_y; + vars->start[2] = my_start_x; + vars->count[2] = my_count_x; + if (!strcmp(dname1, "south_north_stag") && my_rank_y == psizes[0]-1) + vars->count[1]++; + if (!strcmp(dname2, "west_east_stag") && my_rank_x == psizes[1]-1) + vars->count[2]++; + vars->nelems = vars->count[1] * vars->count[2]; + } + else if (ndims == 4) { + vars->start[1] = 0; /* this dimension is not partitioned */ + vars->count[1] = dim1; /* this dimension is not partitioned */ + vars->start[2] = my_start_y; + vars->count[2] = my_count_y; + vars->start[3] = my_start_x; + vars->count[3] = my_count_x; + if (!strcmp(dname2, "south_north_stag") && my_rank_y == psizes[0]-1) + vars->count[2]++; + if (!strcmp(dname3, "west_east_stag") && my_rank_x == psizes[1]-1) + vars->count[3]++; + vars->nelems = dim1 * vars->count[2] * vars->count[3]; + } + + if (xtype == NC_FLOAT) + *buf_size += sizeof(float) * vars->nelems; + else if (xtype == NC_INT) + *buf_size += sizeof(int) * vars->nelems; + else if (xtype == NC_CHAR) + *buf_size += vars->nelems; + } + +err_out: + return err; +} + static int def_dims_vars(int ncid, int hid, /* CDL header ID */ - MPI_Offset *logitute, - MPI_Offset *latitute, + MPI_Offset *longitude, + MPI_Offset *latitude, WRF_VAR *vars) { char *name; @@ -265,10 +405,10 @@ int def_dims_vars(int ncid, CHECK_ERR("cdl_hdr_inq_dim") if (debug) printf("\t name %s size %lld\n",name, size); - if (!strcmp(name, "south_north")) size = *logitute; - else if (!strcmp(name, "south_north_stag")) size = *logitute + 1; - else if (!strcmp(name, "west_east")) size = *latitute; - else if (!strcmp(name, "west_east_stag")) size = *latitute + 1; + if (!strcmp(name, "south_north")) size = *longitude; + else if (!strcmp(name, "south_north_stag")) size = *longitude + 1; + else if (!strcmp(name, "west_east")) size = *latitude; + else if (!strcmp(name, "west_east_stag")) size = *latitude + 1; err = ncmpi_def_dim(ncid, name, size, &dimid); CHECK_ERR("ncmpi_def_dim") @@ -337,13 +477,13 @@ int def_dims_vars(int ncid, } static -int wrf_io_benchmark(char *out_file, - int hid, /* CDL header ID */ - int psizes[2], - MPI_Offset logitute, - MPI_Offset latitute, - int ntimes, - MPI_Info info) +int wrf_w_benchmark(char *out_file, + int hid, /* CDL header ID */ + int psizes[2], + MPI_Offset longitude, + MPI_Offset latitude, + int ntimes, + MPI_Info info) { int i, j, err=NC_NOERR, nprocs, rank; int cmode, ncid, nvars; @@ -362,7 +502,7 @@ int wrf_io_benchmark(char *out_file, vars = (WRF_VAR*) malloc(sizeof(WRF_VAR) * nvars); /* populate variable metadata */ - err = construct_vars(hid, vars, &logitute, &latitute, psizes, &buf_size); + err = construct_vars(hid, vars, &longitude, &latitude, psizes, &buf_size); CHECK_ERR("construct_vars") if (debug) { @@ -418,7 +558,7 @@ int wrf_io_benchmark(char *out_file, CHECK_ERR("ncmpi_create") /* define dimension, variables, and attributes */ - err = def_dims_vars(ncid, hid, &logitute, &latitute, vars); + err = def_dims_vars(ncid, hid, &longitude, &latitude, vars); CHECK_ERR("def_dims_vars") /* exit metadata define mode */ @@ -467,7 +607,7 @@ int wrf_io_benchmark(char *out_file, start_t = end_t; if (debug && rank == 0) { - printf("Flush write requests iteration j=%d\n",j); + printf("Flush write requests at end of iteration j=%d\n",j); fflush(stdout); } @@ -504,22 +644,22 @@ int wrf_io_benchmark(char *out_file, printf("-----------------------------------------------------------\n"); printf("---- WRF-IO write benchmark ----\n"); - printf("Output NetCDF file name: %s\n", out_file); - printf("Number of MPI processes: %d\n", nprocs); - printf("MPI processes arranged to 2D: %d x %d\n", psizes[0], psizes[1]); - printf("Grid size logitute x latitute: %lld x %lld\n",logitute,latitute); - printf("Total number of variables: %d\n", nvars); - printf("Number of time records: %d\n",ntimes); - printf("Total write amount: %lld B\n", sum_w_size); - printf(" %.2f MiB\n", (float)sum_w_size/1048576); - printf(" %.2f GiB\n", (float)sum_w_size/1073741824); + printf("Output NetCDF file name: %s\n", out_file); + printf("Number of MPI processes: %d\n", nprocs); + printf("MPI processes arranged to 2D : %d x %d\n", psizes[0], psizes[1]); + printf("Grid size longitude x latitude: %lld x %lld\n",longitude,latitude); + printf("Total number of variables: %d\n", nvars); + printf("Number of time records: %d\n",ntimes); + printf("Total write amount: %lld B\n", sum_w_size); + printf(" %.2f MiB\n", (float)sum_w_size/1048576); + printf(" %.2f GiB\n", (float)sum_w_size/1073741824); double bw = (double)sum_w_size / 1048576; - printf("Max open-to-close time: %.4f sec\n", max_t[0]); - printf("Max define metadata time: %.4f sec\n", max_t[1]); - printf("Max bput posting time: %.4f sec\n", max_t[2]); - printf("Max wait_all time: %.4f sec\n", max_t[3]); - printf("Write bandwidth: %.2f MiB/s\n", bw/max_t[0]); - printf(" %.2f GiB/s\n", bw/1024.0/max_t[0]); + printf("Max open-to-close time: %.4f sec\n", max_t[0]); + printf("Max define metadata time: %.4f sec\n", max_t[1]); + printf("Max bput posting time: %.4f sec\n", max_t[2]); + printf("Max wait_all time: %.4f sec\n", max_t[3]); + printf("Write bandwidth: %.2f MiB/s\n", bw/max_t[0]); + printf(" %.2f GiB/s\n", bw/1024.0/max_t[0]); printf("-----------------------------------------------------------\n"); MPI_Info_get(info_used, "striping_factor", MPI_MAX_INFO_VAL, value, &flag); printf("MPI-IO hint striping_factor: %s\n", value); @@ -574,8 +714,240 @@ int wrf_io_benchmark(char *out_file, } static -int parse_str(char *in_str, - int **int_arr) +int wrf_r_benchmark(char *in_file, + int psizes[2], + int ntimes, + MPI_Info info) +{ + int i, j, err=NC_NOERR, nprocs, rank, ncid, nvars, dimid; + double timing[4], max_t[4]; + MPI_Offset buf_size, r_size, sum_r_size, longitude, latitude; + MPI_Info info_used; + WRF_VAR *vars=NULL; + + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &nprocs); + + /* open input file */ + err = ncmpi_open(MPI_COMM_WORLD, in_file, NC_NOWRITE, info, &ncid); + CHECK_ERR("ncmpi_open") + + /* start the timer */ + MPI_Barrier(MPI_COMM_WORLD); + timing[0] = MPI_Wtime(); + + err = ncmpi_inq_dimid(ncid, "south_north", &dimid); + CHECK_ERR("ncmpi_inq_dimid") + err = ncmpi_inq_dimlen(ncid, dimid, &longitude); + CHECK_ERR("ncmpi_inq_dimlen") + err = ncmpi_inq_dimid(ncid, "west_east", &dimid); + CHECK_ERR("ncmpi_inq_dimid") + err = ncmpi_inq_dimlen(ncid, dimid, &latitude); + CHECK_ERR("ncmpi_inq_dimlen") + + if (debug && rank == 0) + printf("%s at %d: longitude=%lld latitude=%lld\n",__func__,__LINE__, + longitude,latitude); + + err = ncmpi_inq_nvars(ncid, &nvars); + CHECK_ERR("ncmpi_inq_nvars") + + vars = (WRF_VAR*) malloc(sizeof(WRF_VAR) * nvars); + + /* populate variable metadata */ + err = inquire_vars(ncid, vars, psizes, longitude, latitude, &buf_size); + CHECK_ERR("inquire_vars") + + if (debug) { + printf("%2d: buf_size %lld\n", rank, buf_size); + fflush(stdout); + } + + /* allocate and initialize read buffers */ + MPI_Offset mem_alloc; + if (debug) mem_alloc = 0; + + for (i=0; i 0) free(vars[i].buf); + if (vars[i].name != NULL) free(vars[i].name); + if (vars[i].dimids != NULL) free(vars[i].dimids); + } + free(vars); + } + if (err != NC_NOERR) return err; + + if (err != NC_NOERR) return err; + + /* check if there is any PnetCDF internal malloc residue */ + MPI_Offset malloc_size, sum_size; + err = ncmpi_inq_malloc_size(&malloc_size); + if (err == NC_ENOTENABLED) /* --enable-profiling is not set at configure */ + return NC_NOERR; + else if (err == NC_NOERR) { + MPI_Reduce(&malloc_size, &sum_size, 1, MPI_OFFSET, MPI_SUM, 0, MPI_COMM_WORLD); + if (rank == 0 && sum_size > 0) + printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n", + sum_size); + if (malloc_size > 0) ncmpi_inq_malloc_list(); + } + /* report the PnetCDF internal heap memory allocation high water mark */ + err = ncmpi_inq_malloc_max_size(&malloc_size); + if (err == NC_NOERR) { + MPI_Reduce(&malloc_size, &sum_size, 1, MPI_OFFSET, MPI_MAX, 0, MPI_COMM_WORLD); + if (verbose && rank == 0) + printf("Max heap memory allocated by PnetCDF internally is %.2f MiB\n\n", + (float)sum_size/1048576); + } + fflush(stdout); + + return err; +} + +/*---- parse_str() >---------------------------------------------------------*/ +/* This subroutine parses an input string, in_str, into substring tokens, + * separated by comma, and returns the number of substrings. + */ +static +int parse_str(char *in_str, + char ***str_arr) { char *token, *str_dup; int nelems=0; @@ -596,16 +968,16 @@ int parse_str(char *in_str, free(str_dup); - /* allocate int_arr */ - *int_arr = (int*) malloc(sizeof(int) * nelems); + /* allocate str_arr */ + *str_arr = (char**) malloc(sizeof(char*) * nelems); - /* populate int_arr[] */ + /* populate str_arr[] */ str_dup = strdup(in_str); token = strtok(str_dup, ","); - (*int_arr)[0] = atoi(token); + (*str_arr)[0] = strdup(token); nelems = 1; while ((token = strtok(NULL, ",")) != NULL) - (*int_arr)[nelems++] = atoi(token); + (*str_arr)[nelems++] = strdup(token); free(str_dup); return nelems; @@ -615,16 +987,16 @@ static void usage(char *argv0) { char *help = - "Usage: %s [OPTIONS] -i cdf_file output_file\n" + "Usage: %s [OPTIONS]\n" " [-h] print this help\n" " [-q] quiet mode\n" " [-d] debug mode\n" - " [-l num] logitute of global 2D grid\n" - " [-w num] latitute of global 2D grid\n" + " [-r file1,file2,...] benchmark read performance\n" + " [-w file1,file2,...] benchmark write performance\n" + " [-y num] longitude of global 2D grid\n" + " [-x num] latitude of global 2D grid\n" " [-n num] number of time steps\n" - " [-r str] a list of cb_nodes separated by commas\n" - " -i cdf_file: input text file containing CDL header \n" - " output_file: output netCDF file name\n"; + " [-i cdf_file] input text file containing CDL header\n"; fprintf(stderr, help, argv0); } @@ -632,11 +1004,10 @@ int main(int argc, char** argv) { extern int optind; extern char *optarg; - char out_file[1024], *cdl_file, *cb_nodes_str; - int i, j, err, nerrs=0, nprocs, rank, ntimes, psizes[2], hid; - int num_cb_nodes, num_intra_nodes, *cb_nodes; - int nc_num_aggrs_per_node[]={0}; - MPI_Offset logitute, latitute; + char *out_files, *in_files, *cdl_file, **fname; + int i, err, nerrs=0, nprocs, rank, ntimes, psizes[2], hid; + int nfiles, do_read, do_write; + MPI_Offset longitude, latitude; MPI_Info info=MPI_INFO_NULL; MPI_Init(&argc, &argv); @@ -644,26 +1015,32 @@ int main(int argc, char** argv) MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); - verbose= 1; - debug = 0; - ntimes = 1; - cdl_file = NULL; - logitute = -1; /* default to use west_east from cdl file */ - latitute = -1; /* default to use south_north from cdl file */ - cb_nodes = NULL; - cb_nodes_str = NULL; - - while ((i = getopt(argc, argv, "hqdl:w:n:r:i:")) != EOF) + verbose = 1; + debug = 0; + do_read = 0; + do_write = 0; + ntimes = 1; + cdl_file = NULL; + in_files = NULL; + out_files = NULL; + longitude = -1; /* default to use west_east from cdl file */ + latitude = -1; /* default to use south_north from cdl file */ + + while ((i = getopt(argc, argv, "hqdr:w:y:x:n:c:i:")) != EOF) switch(i) { case 'q': verbose = 0; break; case 'd': debug = 1; break; - case 'l': logitute = atoll(optarg); + case 'r': do_read = 1; + in_files = strdup(optarg); break; - case 'w': latitute = atoll(optarg); + case 'w': do_write = 1; + out_files = strdup(optarg); break; - case 'r': cb_nodes_str = strdup(optarg); + case 'y': longitude = atoll(optarg); + break; + case 'x': latitude = atoll(optarg); break; case 'n': ntimes = atoi(optarg); break; @@ -674,77 +1051,84 @@ int main(int argc, char** argv) MPI_Finalize(); return 1; } - if (argv[optind] == NULL) strcpy(out_file, "testfile.nc"); - else snprintf(out_file, 1024, "%s", argv[optind]); - /* input CDL file is required */ - if (cdl_file == NULL) { - if (rank == 0) usage(argv[0]); + /* check read or write benchmark */ + if (do_read == 0 && do_write == 0) { + if (rank == 0) { + fprintf(stderr, "Error: must select read or write benchmark by setting -r and/or -w\n"); + usage(argv[0]); + } MPI_Finalize(); return 1; } - /* parse CDL header file */ - err = cdl_hdr_open(cdl_file, &hid); - free(cdl_file); - if (err != NC_NOERR) goto err_out; + /* input CDL file is required for write benchmark */ + if (do_write && cdl_file == NULL) { + if (rank == 0) { + fprintf(stderr, "Error: write benchmark requires input CDL file\n"); + usage(argv[0]); + } + MPI_Finalize(); + return 1; + } /* set up the 2D block-block data partitioning pattern */ psizes[0] = psizes[1] = 0; MPI_Dims_create(nprocs, 2, psizes); - if (debug && rank == 0) { - printf("logitute=%lld latitute=%lld psizes=%d x %d\n", - logitute,latitute,psizes[0],psizes[1]); - fflush(stdout); - } + if (do_write) { + /* parse CDL header file */ + err = cdl_hdr_open(cdl_file, &hid); + free(cdl_file); + if (err != NC_NOERR) goto err_out; - if (cb_nodes_str != NULL) { - num_cb_nodes = parse_str(cb_nodes_str, &cb_nodes); - free(cb_nodes_str); - } - else - num_cb_nodes = 1; + if (debug && rank == 0) { + printf("longitude=%lld latitude=%lld psizes=%d x %d\n", + longitude,latitude,psizes[0],psizes[1]); + fflush(stdout); + } - num_intra_nodes = sizeof(nc_num_aggrs_per_node) / sizeof(int); + /* Example of out_files: "0.nc,1.nc,2.nc", i.e. 3 output files */ + nfiles = parse_str(out_files, &fname); - /* set PnetCDF I/O hints */ - MPI_Info_create(&info); + for (i=0; i 0) free(fname); + } - if (debug && rank == 0) { - printf("Info cb_nodes set to %d nc_num_aggrs_per_node set to=%d\n", - cb_nodes[i],nc_num_aggrs_per_node[j]); - fflush(stdout); - } + if (do_read) { + /* Example of in_files: "0.nc,1.nc,2.nc", i.e. 3 input files */ + nfiles = parse_str(in_files, &fname); + for (i=0; i 0) free(fname); } - MPI_Info_free(&info); - if (cb_nodes != NULL) free(cb_nodes); err_out: cdl_hdr_close(hid); + if (out_files != NULL) free(out_files); + if (in_files != NULL) free(in_files); + if (info != MPI_INFO_NULL) MPI_Info_free(&info); MPI_Finalize(); return (nerrs > 0); diff --git a/sneak_peek.md b/sneak_peek.md index 0113b4459..6004b7630 100644 --- a/sneak_peek.md +++ b/sneak_peek.md @@ -84,10 +84,11 @@ This is essentially a placeholder for the next release note ... * New programs for I/O benchmarks + WRF-IO contains an extraction of the I/O kernel of WRF (Wether Research and Forecast Model, a weather prediction computer simulation program - developed at NCAR) that can be used to evaluate the file write performance + developed at NCAR) that can be used to evaluate the file I/O performance of WRF. It's data partitioning pattern is a 2D block-block checkerboard pattern, along the longitude and latitude. - See [PR #165](https://github.com/Parallel-NetCDF/PnetCDF/pull/165). + See [PR #165](https://github.com/Parallel-NetCDF/PnetCDF/pull/165) + and [PR #181](https://github.com/Parallel-NetCDF/PnetCDF/pull/181). * New test program + test/cdf/tst_cdl_hdr_parser.c tests the new CDL header APIs.