X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/30ba0be4e5902c638615637e69223b4e9db1f555..de88ba6429025fa6d048915b20148bc7a301f07f:/teshsuite/smpi/mpich3-test/io/i_aggregation2.c diff --git a/teshsuite/smpi/mpich3-test/io/i_aggregation2.c b/teshsuite/smpi/mpich3-test/io/i_aggregation2.c new file mode 100644 index 0000000000..0647c1b3e2 --- /dev/null +++ b/teshsuite/smpi/mpich3-test/io/i_aggregation2.c @@ -0,0 +1,99 @@ +/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */ +/* + * (C) 2014 by Argonne National Laboratory. + * See COPYRIGHT in top-level directory. + */ + +/* Look for regressions in aggregator code. A more simple access pattern than + * aggregation1 */ + +/* Uses nonblocking collective I/O.*/ + +#include + +#include +#include +#include + +#include +#include + +#include + +#define BUFSIZE 512 + +static void handle_error(int errcode, const char *str) +{ + char msg[MPI_MAX_ERROR_STRING]; + int resultlen; + MPI_Error_string(errcode, msg, &resultlen); + fprintf(stderr, "%s: %s\n", str, msg); + MPI_Abort(MPI_COMM_WORLD, 1); +} + +int main(int argc, char **argv) +{ + MPI_Info info = MPI_INFO_NULL; + MPI_File fh; + MPI_Offset off = 0; + MPI_Status status; + int errcode; + int i, rank, errs = 0, toterrs, buffer[BUFSIZE], buf2[BUFSIZE]; + MPI_Request request; + const char *filename = NULL; + + filename = (argc > 1) ? argv[1] : "testfile"; + + MPI_Init(&argc, &argv); + + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + MPI_Info_create(&info); + MPI_Info_set(info, "romio_cb_write", "enable"); + MPI_Info_set(info, "cb_nodes", "1"); + + for (i = 0; i < BUFSIZE; i++) { + buffer[i] = 10000 + rank; + } + off = rank * sizeof(buffer); + + errcode = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_WRONLY | MPI_MODE_CREATE, info, &fh); + if (errcode != MPI_SUCCESS) + handle_error(errcode, "MPI_File_open"); + errcode = MPI_File_iwrite_at_all(fh, off, buffer, BUFSIZE, MPI_INT, &request); + if (errcode != MPI_SUCCESS) + handle_error(errcode, "MPI_File_iwrite_at_all"); + MPI_Wait(&request, &status); + errcode = MPI_File_close(&fh); + if (errcode != MPI_SUCCESS) + handle_error(errcode, "MPI_File_close"); + + errcode = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, info, &fh); + if (errcode != MPI_SUCCESS) + handle_error(errcode, "MPI_File_open"); + errcode = MPI_File_iread_at_all(fh, off, buf2, BUFSIZE, MPI_INT, &request); + if (errcode != MPI_SUCCESS) + handle_error(errcode, "MPI_File_iread_at_all"); + MPI_Wait(&request, &status); + errcode = MPI_File_close(&fh); + if (errcode != MPI_SUCCESS) + handle_error(errcode, "MPI_File_close"); + + for (i = 0; i < BUFSIZE; i++) { + if (buf2[i] != 10000 + rank) + errs++; + } + MPI_Allreduce(&errs, &toterrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + if (rank == 0) { + if (toterrs > 0) { + fprintf(stderr, "Found %d errors\n", toterrs); + } + else { + fprintf(stdout, " No Errors\n"); + } + } + MPI_Info_free(&info); + MPI_Finalize(); + + return 0; +}