From: Augustin Degomme Date: Thu, 4 Apr 2019 21:42:38 +0000 (+0200) Subject: test more extensively error returns for collectives. X-Git-Tag: v3.22.2~171 X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/commitdiff_plain/59a1dfe76a80ea4facc6231d05b861e6966954d2 test more extensively error returns for collectives. --- diff --git a/src/smpi/bindings/smpi_pmpi_coll.cpp b/src/smpi/bindings/smpi_pmpi_coll.cpp index 348b2dab2b..99eea0e2cd 100644 --- a/src/smpi/bindings/smpi_pmpi_coll.cpp +++ b/src/smpi/bindings/smpi_pmpi_coll.cpp @@ -15,11 +15,6 @@ XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi); /* PMPI User level calls */ -int PMPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm) -{ - return PMPI_Ibcast(buf, count, datatype, root, comm, MPI_REQUEST_IGNORED); -} - int PMPI_Barrier(MPI_Comm comm) { return PMPI_Ibarrier(comm, MPI_REQUEST_IGNORED); @@ -48,22 +43,28 @@ int PMPI_Ibarrier(MPI_Comm comm, MPI_Request *request) return retval; } +int PMPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm) +{ + return PMPI_Ibcast(buf, count, datatype, root, comm, MPI_REQUEST_IGNORED); +} + int PMPI_Ibcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm, MPI_Request* request) { - int retval = 0; - smpi_bench_end(); if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; + return MPI_ERR_COMM; + } if (buf == nullptr) { + return MPI_ERR_BUFFER; } else if (datatype == MPI_DATATYPE_NULL || not datatype->is_valid()) { - retval = MPI_ERR_TYPE; + return MPI_ERR_TYPE; } else if (count < 0){ - retval = MPI_ERR_COUNT; + return MPI_ERR_COUNT; } else if (root < 0 || root >= comm->size()){ - retval = MPI_ERR_ROOT; + return MPI_ERR_ROOT; } else if (request == nullptr){ - retval = MPI_ERR_ARG; + return MPI_ERR_ARG; } else { + smpi_bench_end(); int rank = simgrid::s4u::this_actor::get_pid(); TRACE_smpi_comm_in(rank, request==MPI_REQUEST_IGNORED?"PMPI_Bcast":"PMPI_Ibcast", new simgrid::instr::CollTIData(request==MPI_REQUEST_IGNORED?"bcast":"ibcast", root, -1.0, @@ -78,12 +79,10 @@ int PMPI_Ibcast(void *buf, int count, MPI_Datatype datatype, if(request!=MPI_REQUEST_IGNORED) *request = MPI_REQUEST_NULL; } - retval = MPI_SUCCESS; - TRACE_smpi_comm_out(rank); + smpi_bench_begin(); + return MPI_SUCCESS; } - smpi_bench_begin(); - return retval; } int PMPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvbuf, int recvcount, MPI_Datatype recvtype, @@ -94,23 +93,21 @@ int PMPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvbu int PMPI_Igather(void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm, MPI_Request *request) { - int retval = 0; - - smpi_bench_end(); - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; + return MPI_ERR_COMM; + } else if ((sendbuf == nullptr) || ((comm->rank() == root) && recvbuf == nullptr)) { + return MPI_ERR_BUFFER; } else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) || ((comm->rank() == root) && (recvtype == MPI_DATATYPE_NULL))){ - retval = MPI_ERR_TYPE; + return MPI_ERR_TYPE; } else if ((( sendbuf != MPI_IN_PLACE) && (sendcount <0)) || ((comm->rank() == root) && (recvcount <0))){ - retval = MPI_ERR_COUNT; + return MPI_ERR_COUNT; } else if (root < 0 || root >= comm->size()){ - retval = MPI_ERR_ROOT; + return MPI_ERR_ROOT; } else if (request == nullptr){ - retval = MPI_ERR_ARG; + return MPI_ERR_ARG; } else { - + smpi_bench_end(); char* sendtmpbuf = static_cast(sendbuf); int sendtmpcount = sendcount; MPI_Datatype sendtmptype = sendtype; @@ -131,12 +128,10 @@ int PMPI_Igather(void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvb else simgrid::smpi::Colls::igather(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcount, recvtype, root, comm, request); - retval = MPI_SUCCESS; TRACE_smpi_comm_out(rank); + smpi_bench_begin(); + return MPI_SUCCESS; } - - smpi_bench_begin(); - return retval; } int PMPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int *recvcounts, int *displs, @@ -147,24 +142,28 @@ int PMPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recv int PMPI_Igatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int *recvcounts, int *displs, MPI_Datatype recvtype, int root, MPI_Comm comm, MPI_Request *request) { - int retval = 0; - - smpi_bench_end(); - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; + return MPI_ERR_COMM; + } else if ((sendbuf == nullptr) || ((comm->rank() == root) && recvbuf == nullptr)) { + return MPI_ERR_BUFFER; } else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) || ((comm->rank() == root) && (recvtype == MPI_DATATYPE_NULL))){ - retval = MPI_ERR_TYPE; + return MPI_ERR_TYPE; } else if (( sendbuf != MPI_IN_PLACE) && (sendcount <0)){ - retval = MPI_ERR_COUNT; + return MPI_ERR_COUNT; } else if ((comm->rank() == root) && (recvcounts == nullptr || displs == nullptr)) { - retval = MPI_ERR_ARG; + return MPI_ERR_ARG; } else if (root < 0 || root >= comm->size()){ - retval = MPI_ERR_ROOT; + return MPI_ERR_ROOT; } else if (request == nullptr){ - retval = MPI_ERR_ARG; + return MPI_ERR_ARG; } else { + for (int i = 0; i < comm->size(); i++){ + if((comm->rank() == root) && (recvcounts[i]<0)) + return MPI_ERR_COUNT; + } + + smpi_bench_end(); char* sendtmpbuf = static_cast(sendbuf); int sendtmpcount = sendcount; MPI_Datatype sendtmptype = sendtype; @@ -189,14 +188,13 @@ int PMPI_Igatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *rec dt_size_recv, trace_recvcounts, simgrid::smpi::Datatype::encode(sendtmptype), simgrid::smpi::Datatype::encode(recvtype))); if(request == MPI_REQUEST_IGNORED) - retval = simgrid::smpi::Colls::gatherv(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcounts, displs, recvtype, root, comm); + simgrid::smpi::Colls::gatherv(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcounts, displs, recvtype, root, comm); else - retval = simgrid::smpi::Colls::igatherv(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcounts, displs, recvtype, root, comm, request); + simgrid::smpi::Colls::igatherv(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcounts, displs, recvtype, root, comm, request); TRACE_smpi_comm_out(rank); + smpi_bench_begin(); + return MPI_SUCCESS; } - - smpi_bench_begin(); - return retval; } int PMPI_Allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype, @@ -213,6 +211,8 @@ int PMPI_Iallgather(void *sendbuf, int sendcount, MPI_Datatype sendtype, if (comm == MPI_COMM_NULL) { retval = MPI_ERR_COMM; + } else if ((sendbuf == nullptr) || (recvbuf == nullptr)){ + retval = MPI_ERR_BUFFER; } else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) || (recvtype == MPI_DATATYPE_NULL)){ retval = MPI_ERR_TYPE; @@ -252,22 +252,25 @@ int PMPI_Allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int PMPI_Iallgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int *recvcounts, int *displs, MPI_Datatype recvtype, MPI_Comm comm, MPI_Request* request) { - int retval = 0; - - smpi_bench_end(); - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; + return MPI_ERR_COMM; + } else if ((sendbuf == nullptr) || (recvbuf == nullptr)){ + return MPI_ERR_BUFFER; } else if (((sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) || (recvtype == MPI_DATATYPE_NULL)) { - retval = MPI_ERR_TYPE; + return MPI_ERR_TYPE; } else if (( sendbuf != MPI_IN_PLACE) && (sendcount <0)){ - retval = MPI_ERR_COUNT; + return MPI_ERR_COUNT; } else if (recvcounts == nullptr || displs == nullptr) { - retval = MPI_ERR_ARG; + return MPI_ERR_ARG; } else if (request == nullptr){ - retval = MPI_ERR_ARG; + return MPI_ERR_ARG; } else { + for (int i = 0; i < comm->size(); i++){ // copy data to avoid bad free + if (recvcounts[i] < 0) + return MPI_ERR_COUNT; + } + smpi_bench_end(); if(sendbuf == MPI_IN_PLACE) { sendbuf=static_cast(recvbuf)+recvtype->get_extent()*displs[comm->rank()]; sendcount=recvcounts[comm->rank()]; @@ -277,8 +280,9 @@ int PMPI_Iallgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dt_size_recv = recvtype->is_replayable() ? 1 : recvtype->size(); std::vector* trace_recvcounts = new std::vector; - for (int i = 0; i < comm->size(); i++) // copy data to avoid bad free + for (int i = 0; i < comm->size(); i++){ // copy data to avoid bad free trace_recvcounts->push_back(recvcounts[i] * dt_size_recv); + } TRACE_smpi_comm_in(rank, request==MPI_REQUEST_IGNORED?"PMPI_Allgatherv":"PMPI_Iallgatherv", new simgrid::instr::VarCollTIData( @@ -289,12 +293,10 @@ int PMPI_Iallgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, simgrid::smpi::Colls::allgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm); else simgrid::smpi::Colls::iallgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm, request); - retval = MPI_SUCCESS; TRACE_smpi_comm_out(rank); + smpi_bench_begin(); + return MPI_SUCCESS; } - - smpi_bench_begin(); - return retval; } int PMPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype, @@ -305,27 +307,23 @@ int PMPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype, int PMPI_Iscatter(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm, MPI_Request* request) { - int retval = 0; - - smpi_bench_end(); - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; + return MPI_ERR_COMM; } else if (((comm->rank() == root) && (sendtype == MPI_DATATYPE_NULL || not sendtype->is_valid())) || ((recvbuf != MPI_IN_PLACE) && (recvtype == MPI_DATATYPE_NULL || not recvtype->is_valid()))) { - retval = MPI_ERR_TYPE; + return MPI_ERR_TYPE; } else if (((comm->rank() == root) && (sendcount < 0)) || ((recvbuf != MPI_IN_PLACE) && (recvcount < 0))) { - retval = MPI_ERR_COUNT; + return MPI_ERR_COUNT; } else if ((sendbuf == recvbuf) || - ((comm->rank()==root) && sendcount>0 && (sendbuf == nullptr))){ - retval = MPI_ERR_BUFFER; + ((comm->rank()==root) && sendcount>0 && (sendbuf == nullptr)) || (recvbuf == nullptr)){ + return MPI_ERR_BUFFER; } else if (root < 0 || root >= comm->size()){ - retval = MPI_ERR_ROOT; + return MPI_ERR_ROOT; } else if (request == nullptr){ - retval = MPI_ERR_ARG; + return MPI_ERR_ARG; } else { - + smpi_bench_end(); if (recvbuf == MPI_IN_PLACE) { recvtype = sendtype; recvcount = sendcount; @@ -343,12 +341,10 @@ int PMPI_Iscatter(void *sendbuf, int sendcount, MPI_Datatype sendtype, simgrid::smpi::Colls::scatter(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm); else simgrid::smpi::Colls::iscatter(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm, request); - retval = MPI_SUCCESS; TRACE_smpi_comm_out(rank); + smpi_bench_begin(); + return MPI_SUCCESS; } - - smpi_bench_begin(); - return retval; } int PMPI_Scatterv(void *sendbuf, int *sendcounts, int *displs, @@ -359,30 +355,32 @@ int PMPI_Scatterv(void *sendbuf, int *sendcounts, int *displs, int PMPI_Iscatterv(void *sendbuf, int *sendcounts, int *displs, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm, MPI_Request *request) { - int retval = 0; - - smpi_bench_end(); - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; + return MPI_ERR_COMM; } else if (sendcounts == nullptr || displs == nullptr) { - retval = MPI_ERR_ARG; + return MPI_ERR_ARG; } else if (((comm->rank() == root) && (sendtype == MPI_DATATYPE_NULL)) || ((recvbuf != MPI_IN_PLACE) && (recvtype == MPI_DATATYPE_NULL))) { - retval = MPI_ERR_TYPE; + return MPI_ERR_TYPE; } else if (request == nullptr){ - retval = MPI_ERR_ARG; + return MPI_ERR_ARG; } else if (recvbuf != MPI_IN_PLACE && recvcount < 0){ - retval = MPI_ERR_COUNT; + return MPI_ERR_COUNT; } else if (root < 0 || root >= comm->size()){ - retval = MPI_ERR_ROOT; + return MPI_ERR_ROOT; } else { - if (recvbuf == MPI_IN_PLACE) { + if (comm->rank() == root){ + if(recvbuf == MPI_IN_PLACE) { recvtype = sendtype; - if(sendcounts[comm->rank()]<0) - return MPI_ERR_COUNT; recvcount = sendcounts[comm->rank()]; + } + for (int i = 0; i < comm->size(); i++){ + if(sendcounts[i]<0) + return MPI_ERR_COUNT; + } } + smpi_bench_end(); + int rank = simgrid::s4u::this_actor::get_pid(); int dt_size_send = sendtype->is_replayable() ? 1 : sendtype->size(); @@ -390,9 +388,7 @@ int PMPI_Iscatterv(void *sendbuf, int *sendcounts, int *displs, if (comm->rank() == root) { for (int i = 0; i < comm->size(); i++){ // copy data to avoid bad free trace_sendcounts->push_back(sendcounts[i] * dt_size_send); - if(sendcounts[i]<0) - return MPI_ERR_COUNT; - } + } } TRACE_smpi_comm_in(rank, request==MPI_REQUEST_IGNORED?"PMPI_Scatterv":"PMPI_Iscatterv", @@ -401,15 +397,14 @@ int PMPI_Iscatterv(void *sendbuf, int *sendcounts, int *displs, recvtype->is_replayable() ? recvcount : recvcount * recvtype->size(), nullptr, simgrid::smpi::Datatype::encode(sendtype), simgrid::smpi::Datatype::encode(recvtype))); if(request == MPI_REQUEST_IGNORED) - retval = simgrid::smpi::Colls::scatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm); + simgrid::smpi::Colls::scatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm); else - retval = simgrid::smpi::Colls::iscatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm, request); - + simgrid::smpi::Colls::iscatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm, request); TRACE_smpi_comm_out(rank); + smpi_bench_begin(); + return MPI_SUCCESS; } - smpi_bench_begin(); - return retval; } int PMPI_Reduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm) @@ -419,23 +414,22 @@ int PMPI_Reduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, int PMPI_Ireduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm, MPI_Request* request) { - int retval = 0; - - smpi_bench_end(); - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; + return MPI_ERR_COMM; + } if ((sendbuf == nullptr) || ((comm->rank() == root) && recvbuf == nullptr)) { + return MPI_ERR_BUFFER; } else if (datatype == MPI_DATATYPE_NULL || not datatype->is_valid()){ - retval = MPI_ERR_TYPE; + return MPI_ERR_TYPE; } else if (op == MPI_OP_NULL) { - retval = MPI_ERR_OP; + return MPI_ERR_OP; } else if (request == nullptr){ - retval = MPI_ERR_ARG; + return MPI_ERR_ARG; } else if (root < 0 || root >= comm->size()){ - retval = MPI_ERR_ROOT; + return MPI_ERR_ROOT; } else if (count < 0){ - retval = MPI_ERR_COUNT; + return MPI_ERR_COUNT; } else { + smpi_bench_end(); int rank = simgrid::s4u::this_actor::get_pid(); TRACE_smpi_comm_in(rank, request==MPI_REQUEST_IGNORED ? "PMPI_Reduce":"PMPI_Ireduce", @@ -447,13 +441,10 @@ int PMPI_Ireduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, else simgrid::smpi::Colls::ireduce(sendbuf, recvbuf, count, datatype, op, root, comm, request); - - retval = MPI_SUCCESS; TRACE_smpi_comm_out(rank); + smpi_bench_begin(); + return MPI_SUCCESS; } - - smpi_bench_begin(); - return retval; } int PMPI_Reduce_local(void *inbuf, void *inoutbuf, int count, MPI_Datatype datatype, MPI_Op op){ @@ -481,21 +472,20 @@ int PMPI_Allreduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatyp int PMPI_Iallreduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request) { - int retval = 0; - - smpi_bench_end(); - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; + return MPI_ERR_COMM; + } if ((sendbuf == nullptr) || (recvbuf == nullptr)) { + return MPI_ERR_BUFFER; } else if (datatype == MPI_DATATYPE_NULL || not datatype->is_valid()) { - retval = MPI_ERR_TYPE; + return MPI_ERR_TYPE; } else if (count < 0){ - retval = MPI_ERR_COUNT; + return MPI_ERR_COUNT; } else if (op == MPI_OP_NULL) { - retval = MPI_ERR_OP; + return MPI_ERR_OP; } else if (request == nullptr){ - retval = MPI_ERR_ARG; + return MPI_ERR_ARG; } else { + smpi_bench_end(); char* sendtmpbuf = static_cast(sendbuf); if( sendbuf == MPI_IN_PLACE ) { sendtmpbuf = static_cast(xbt_malloc(count*datatype->get_extent())); @@ -516,12 +506,10 @@ int PMPI_Iallreduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype dataty if( sendbuf == MPI_IN_PLACE ) xbt_free(sendtmpbuf); - retval = MPI_SUCCESS; TRACE_smpi_comm_out(rank); + smpi_bench_begin(); + return MPI_SUCCESS; } - - smpi_bench_begin(); - return retval; } int PMPI_Scan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) @@ -625,27 +613,32 @@ int PMPI_Reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts, MPI_Datat int PMPI_Ireduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request) { int retval = 0; - smpi_bench_end(); if (comm == MPI_COMM_NULL) { retval = MPI_ERR_COMM; + } else if ((sendbuf == nullptr) || (recvbuf == nullptr)) { + retval = MPI_ERR_BUFFER; } else if (datatype == MPI_DATATYPE_NULL || not datatype->is_valid()){ retval = MPI_ERR_TYPE; } else if (op == MPI_OP_NULL) { retval = MPI_ERR_OP; } else if (recvcounts == nullptr) { retval = MPI_ERR_ARG; - } else if (request == nullptr){ + } else if (request == nullptr){ retval = MPI_ERR_ARG; } else { + for (int i = 0; i < comm->size(); i++) { // copy data to avoid bad free + if(recvcounts[i]<0) + return MPI_ERR_COUNT; + } + smpi_bench_end(); + int rank = simgrid::s4u::this_actor::get_pid(); std::vector* trace_recvcounts = new std::vector; int dt_send_size = datatype->is_replayable() ? 1 : datatype->size(); int totalcount = 0; for (int i = 0; i < comm->size(); i++) { // copy data to avoid bad free - if(recvcounts[i]<0) - return MPI_ERR_COUNT; trace_recvcounts->push_back(recvcounts[i] * dt_send_size); totalcount += recvcounts[i]; } @@ -670,9 +663,9 @@ int PMPI_Ireduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts, MPI_Data if (sendbuf == MPI_IN_PLACE) xbt_free(sendtmpbuf); + smpi_bench_begin(); } - smpi_bench_begin(); return retval; } @@ -747,6 +740,8 @@ int PMPI_Ialltoall(void* sendbuf, int sendcount, MPI_Datatype sendtype, void* re if (comm == MPI_COMM_NULL) { retval = MPI_ERR_COMM; + } else if (sendbuf == nullptr || recvbuf == nullptr) { + retval = MPI_ERR_BUFFER; } else if ((sendbuf != MPI_IN_PLACE && sendtype == MPI_DATATYPE_NULL) || recvtype == MPI_DATATYPE_NULL) { retval = MPI_ERR_TYPE; } else if ((sendbuf != MPI_IN_PLACE && sendcount < 0) || recvcount < 0){ @@ -796,11 +791,10 @@ int PMPI_Ialltoallv(void* sendbuf, int* sendcounts, int* senddisps, MPI_Datatype int* recvcounts, int* recvdisps, MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request) { int retval = 0; - - smpi_bench_end(); - if (comm == MPI_COMM_NULL) { retval = MPI_ERR_COMM; + } else if (sendbuf == nullptr || recvbuf == nullptr) { + retval = MPI_ERR_BUFFER; } else if ((sendbuf != MPI_IN_PLACE && sendtype == MPI_DATATYPE_NULL) || recvtype == MPI_DATATYPE_NULL) { retval = MPI_ERR_TYPE; } else if ((sendbuf != MPI_IN_PLACE && (sendcounts == nullptr || senddisps == nullptr)) || recvcounts == nullptr || @@ -811,6 +805,11 @@ int PMPI_Ialltoallv(void* sendbuf, int* sendcounts, int* senddisps, MPI_Datatype } else { int rank = simgrid::s4u::this_actor::get_pid(); int size = comm->size(); + for (int i = 0; i < size; i++) { + if (recvcounts[i] <0 || (sendbuf != MPI_IN_PLACE && sendcounts[i]<0)) + return MPI_ERR_COUNT; + } + smpi_bench_end(); int send_size = 0; int recv_size = 0; std::vector* trace_sendcounts = new std::vector; @@ -823,8 +822,6 @@ int PMPI_Ialltoallv(void* sendbuf, int* sendcounts, int* senddisps, MPI_Datatype MPI_Datatype sendtmptype = sendtype; int maxsize = 0; for (int i = 0; i < size; i++) { // copy data to avoid bad free - if (recvcounts[i] <0 || (sendbuf != MPI_IN_PLACE && sendcounts[i]<0)) - return MPI_ERR_COUNT; recv_size += recvcounts[i] * dt_size_recv; trace_recvcounts->push_back(recvcounts[i] * dt_size_recv); if (((recvdisps[i] + recvcounts[i]) * dt_size_recv) > maxsize) @@ -866,9 +863,8 @@ int PMPI_Ialltoallv(void* sendbuf, int* sendcounts, int* senddisps, MPI_Datatype xbt_free(sendtmpcounts); xbt_free(sendtmpdisps); } + smpi_bench_begin(); } - - smpi_bench_begin(); return retval; } @@ -882,11 +878,10 @@ int PMPI_Ialltoallw(void* sendbuf, int* sendcounts, int* senddisps, MPI_Datatype int* recvcounts, int* recvdisps, MPI_Datatype* recvtypes, MPI_Comm comm, MPI_Request *request) { int retval = 0; - - smpi_bench_end(); - if (comm == MPI_COMM_NULL) { retval = MPI_ERR_COMM; + } else if (sendbuf == nullptr || recvbuf == nullptr) { + retval = MPI_ERR_BUFFER; } else if ((sendbuf != MPI_IN_PLACE && sendtypes == nullptr) || recvtypes == nullptr) { retval = MPI_ERR_TYPE; } else if ((sendbuf != MPI_IN_PLACE && (sendcounts == nullptr || senddisps == nullptr)) || recvcounts == nullptr || @@ -895,8 +890,13 @@ int PMPI_Ialltoallw(void* sendbuf, int* sendcounts, int* senddisps, MPI_Datatype } else if (request == nullptr){ retval = MPI_ERR_ARG; } else { + smpi_bench_end(); int rank = simgrid::s4u::this_actor::get_pid(); int size = comm->size(); + for (int i = 0; i < size; i++) { // copy data to avoid bad free + if (recvcounts[i] <0 || (sendbuf != MPI_IN_PLACE && sendcounts[i]<0)) + return MPI_ERR_COUNT; + } int send_size = 0; int recv_size = 0; std::vector* trace_sendcounts = new std::vector; @@ -954,8 +954,7 @@ int PMPI_Ialltoallw(void* sendbuf, int* sendcounts, int* senddisps, MPI_Datatype xbt_free(sendtmpdisps); xbt_free(sendtmptypes); } + smpi_bench_begin(); } - - smpi_bench_begin(); return retval; } diff --git a/teshsuite/smpi/coll-allgather/coll-allgather.c b/teshsuite/smpi/coll-allgather/coll-allgather.c index 55d3c4a789..787a3bb005 100644 --- a/teshsuite/smpi/coll-allgather/coll-allgather.c +++ b/teshsuite/smpi/coll-allgather/coll-allgather.c @@ -29,6 +29,28 @@ int main(int argc, char *argv[]) for (int i = 0; i < count * size; ++i) rb[i] = 0; + status = MPI_Allgather(NULL, count, MPI_INT, rb, count, MPI_INT, MPI_COMM_WORLD); + if(status!=MPI_ERR_BUFFER) + printf("MPI_Allgather did not return MPI_ERR_BUFFER for empty sendbuf\n"); + status = MPI_Allgather(sb, -1, MPI_INT, rb, count, MPI_INT, MPI_COMM_WORLD); + if(status!=MPI_ERR_COUNT) + printf("MPI_Allgather did not return MPI_ERR_COUNT for -1 sendcount\n"); + status = MPI_Allgather(sb, count, MPI_DATATYPE_NULL, rb, count, MPI_INT, MPI_COMM_WORLD); + if(status!=MPI_ERR_TYPE) + printf("MPI_Allgather did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL sendtype\n"); + status = MPI_Allgather(sb, count, MPI_INT, NULL, count, MPI_INT, MPI_COMM_WORLD); + if(status!=MPI_ERR_BUFFER) + printf("MPI_Allgather did not return MPI_ERR_BUFFER for empty recvbuf\n"); + status = MPI_Allgather(sb, count, MPI_INT, rb, -1, MPI_INT, MPI_COMM_WORLD); + if(status!=MPI_ERR_COUNT) + printf("MPI_Allgather did not return MPI_ERR_COUNT for -1 recvcount\n"); + status = MPI_Allgather(sb, count, MPI_INT, rb, count, MPI_DATATYPE_NULL, MPI_COMM_WORLD); + if(status!=MPI_ERR_TYPE) + printf("MPI_Allgather did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL recvtype\n"); + status = MPI_Allgather(sb, count, MPI_INT, rb, count, MPI_INT, MPI_COMM_NULL); + if(status!=MPI_ERR_COMM) + printf("MPI_Allgather did not return MPI_ERR_COMM for MPI_COMM_NULL comm\n"); + printf("[%d] sndbuf=[", rank); for (int i = 0; i < count; i++) printf("%d ", sb[i]); diff --git a/teshsuite/smpi/coll-allgather/coll-allgather.tesh b/teshsuite/smpi/coll-allgather/coll-allgather.tesh index 7f6d031a31..d18ded9805 100644 --- a/teshsuite/smpi/coll-allgather/coll-allgather.tesh +++ b/teshsuite/smpi/coll-allgather/coll-allgather.tesh @@ -2,7 +2,7 @@ ! output sort p Test allgather -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allgather --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allgather --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error > [rank 0] -> Tremblay > [rank 1] -> Tremblay > [rank 2] -> Tremblay diff --git a/teshsuite/smpi/coll-allgatherv/coll-allgatherv.c b/teshsuite/smpi/coll-allgatherv/coll-allgatherv.c index 852dba3aef..37018a4023 100644 --- a/teshsuite/smpi/coll-allgatherv/coll-allgatherv.c +++ b/teshsuite/smpi/coll-allgatherv/coll-allgatherv.c @@ -34,6 +34,31 @@ int main(int argc, char *argv[]) int* sb = (int *) xbt_malloc(recv_counts[rank] * sizeof(int)); int* rb = (int *) xbt_malloc(recv_sb_size * sizeof(int)); + status = MPI_Allgatherv(NULL, recv_counts[rank], MPI_INT, rb, recv_counts, recv_disps, MPI_INT, MPI_COMM_WORLD); + if(status!=MPI_ERR_BUFFER) + printf("MPI_Allgatherv did not return MPI_ERR_BUFFER for empty sendbuf\n"); + status = MPI_Allgatherv(sb, -1, MPI_INT, rb, recv_counts, recv_disps, MPI_INT, MPI_COMM_WORLD); + if(status!=MPI_ERR_COUNT) + printf("MPI_Allgatherv did not return MPI_ERR_COUNT for -1 sendcount\n"); + status = MPI_Allgatherv(sb, recv_counts[rank], MPI_DATATYPE_NULL, rb, recv_counts, recv_disps, MPI_INT, MPI_COMM_WORLD); + if(status!=MPI_ERR_TYPE) + printf("MPI_Allgatherv did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL sendtype\n"); + status = MPI_Allgatherv(sb, recv_counts[rank], MPI_INT, NULL, recv_counts, recv_disps, MPI_INT, MPI_COMM_WORLD); + if(status!=MPI_ERR_BUFFER) + printf("MPI_Allgatherv did not return MPI_ERR_BUFFER for empty recvbuf\n"); + status = MPI_Allgatherv(sb, recv_counts[rank], MPI_INT, rb, NULL, recv_disps, MPI_INT, MPI_COMM_WORLD); + if(status!=MPI_ERR_ARG) + printf("MPI_Allgatherv did not return MPI_ERR_ARG for NULL recvcounts\n"); + status = MPI_Allgatherv(sb, recv_counts[rank], MPI_INT, rb, recv_counts, NULL, MPI_INT, MPI_COMM_WORLD); + if(status!=MPI_ERR_ARG) + printf("MPI_Allgatherv did not return MPI_ERR_ARG for NULL recvdisps\n"); + status = MPI_Allgatherv(sb, recv_counts[rank], MPI_INT, rb, recv_counts, recv_disps, MPI_DATATYPE_NULL, MPI_COMM_WORLD); + if(status!=MPI_ERR_TYPE) + printf("MPI_Allgatherv did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL recvtype\n"); + status = MPI_Allgatherv(sb, recv_counts[rank], MPI_INT, rb, recv_counts, recv_disps, MPI_INT, MPI_COMM_NULL); + if(status!=MPI_ERR_COMM) + printf("MPI_Allgatherv did not return MPI_ERR_COMM for MPI_COMM_NULL comm\n"); + printf("[%d] sndbuf=[", rank); for (i = 0; i < recv_counts[rank]; i++){ sb[i] = recv_disps[rank] + i; diff --git a/teshsuite/smpi/coll-allgatherv/coll-allgatherv.tesh b/teshsuite/smpi/coll-allgatherv/coll-allgatherv.tesh index 835f62872d..3716966b8e 100644 --- a/teshsuite/smpi/coll-allgatherv/coll-allgatherv.tesh +++ b/teshsuite/smpi/coll-allgatherv/coll-allgatherv.tesh @@ -2,7 +2,7 @@ ! output sort p Test allgatherv -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allgatherv --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allgatherv --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error > [rank 0] -> Tremblay > [rank 1] -> Tremblay > [rank 2] -> Tremblay diff --git a/teshsuite/smpi/coll-allreduce/coll-allreduce-automatic.tesh b/teshsuite/smpi/coll-allreduce/coll-allreduce-automatic.tesh index 939de4860a..8471bda576 100644 --- a/teshsuite/smpi/coll-allreduce/coll-allreduce-automatic.tesh +++ b/teshsuite/smpi/coll-allreduce/coll-allreduce-automatic.tesh @@ -2,7 +2,7 @@ p Test allreduce ! output sort -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --cfg=smpi/allreduce:automatic --cfg=smpi/async-small-thresh:65536 --cfg=smpi/send-is-detached-thresh:128000 --cfg=smpi/simulate-computation:no "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --cfg=smpi/allreduce:automatic --cfg=smpi/async-small-thresh:65536 --cfg=smpi/send-is-detached-thresh:128000 --cfg=smpi/simulate-computation:no "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" --log=smpi_mpi.thres:error > [rank 0] -> Tremblay > [rank 1] -> Tremblay > [rank 2] -> Tremblay diff --git a/teshsuite/smpi/coll-allreduce/coll-allreduce-large.tesh b/teshsuite/smpi/coll-allreduce/coll-allreduce-large.tesh index 1aa4a721a6..a8f6ac316a 100644 --- a/teshsuite/smpi/coll-allreduce/coll-allreduce-large.tesh +++ b/teshsuite/smpi/coll-allreduce/coll-allreduce-large.tesh @@ -3,7 +3,7 @@ ! timeout 20 p Test allreduce -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce 300000 --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce 300000 --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error > [rank 0] -> Tremblay > [rank 1] -> Jupiter > [rank 2] -> Fafard diff --git a/teshsuite/smpi/coll-allreduce/coll-allreduce.c b/teshsuite/smpi/coll-allreduce/coll-allreduce.c index b5b71a8dab..6d4264890f 100644 --- a/teshsuite/smpi/coll-allreduce/coll-allreduce.c +++ b/teshsuite/smpi/coll-allreduce/coll-allreduce.c @@ -33,6 +33,25 @@ int main(int argc, char *argv[]) rb[i] = 0; } + status = MPI_Allreduce(NULL, rb, size, MPI_UNSIGNED_LONG_LONG, MPI_SUM, MPI_COMM_WORLD); + if(status!=MPI_ERR_BUFFER) + printf("MPI_Allreduce did not return MPI_ERR_BUFFER for empty sendbuf\n"); + status = MPI_Allreduce(sb, NULL, size, MPI_UNSIGNED_LONG_LONG, MPI_SUM, MPI_COMM_WORLD); + if(status!=MPI_ERR_BUFFER) + printf("MPI_Allreduce did not return MPI_ERR_BUFFER for empty recvbuf\n"); + status = MPI_Allreduce(sb, rb, -1, MPI_UNSIGNED_LONG_LONG, MPI_SUM, MPI_COMM_WORLD); + if(status!=MPI_ERR_COUNT) + printf("MPI_Allreduce did not return MPI_ERR_COUNT for -1 count\n"); + status = MPI_Allreduce(sb, rb, size, MPI_DATATYPE_NULL, MPI_SUM, MPI_COMM_WORLD); + if(status!=MPI_ERR_TYPE) + printf("MPI_Allreduce did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL type\n"); + status = MPI_Allreduce(sb, rb, size, MPI_UNSIGNED_LONG_LONG, MPI_OP_NULL, MPI_COMM_WORLD); + if(status!=MPI_ERR_OP) + printf("MPI_Allreduce did not return MPI_ERR_COMM for MPI_OP_NULL op\n"); + status = MPI_Allreduce(sb, rb, size, MPI_UNSIGNED_LONG_LONG, MPI_SUM, MPI_COMM_NULL); + if(status!=MPI_ERR_COMM) + printf("MPI_Allreduce did not return MPI_ERR_COMM for MPI_COMM_NULL comm\n"); + printf("[%d] sndbuf=[", rank); for (i = 0; i < size *mult; i++) printf("%d ", sb[i]); diff --git a/teshsuite/smpi/coll-allreduce/coll-allreduce.tesh b/teshsuite/smpi/coll-allreduce/coll-allreduce.tesh index f1b9a3c67c..83b9fa24f5 100644 --- a/teshsuite/smpi/coll-allreduce/coll-allreduce.tesh +++ b/teshsuite/smpi/coll-allreduce/coll-allreduce.tesh @@ -2,7 +2,7 @@ ! output sort p Test allreduce -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error > [rank 0] -> Tremblay > [rank 1] -> Tremblay > [rank 2] -> Tremblay diff --git a/teshsuite/smpi/coll-alltoall/clusters.tesh b/teshsuite/smpi/coll-alltoall/clusters.tesh index 33faf0c66c..f31c1dbccf 100644 --- a/teshsuite/smpi/coll-alltoall/clusters.tesh +++ b/teshsuite/smpi/coll-alltoall/clusters.tesh @@ -2,7 +2,7 @@ ! output sort p Test classic - backbone -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ${bindir}/../hostfile_cluster -platform ${platfdir:=.}/cluster_backbone.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ${bindir}/../hostfile_cluster -platform ${platfdir:=.}/cluster_backbone.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error > [rank 0] -> node-0.simgrid.org > [rank 1] -> node-1.simgrid.org > [rank 2] -> node-2.simgrid.org @@ -42,7 +42,7 @@ $ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ${bindir}/../host ! output sort p Test separate clusters -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -platform ../../../examples/platforms/cluster_multi.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -platform ../../../examples/platforms/cluster_multi.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error > [rank 0] -> node-0.1core.org > [rank 1] -> node-1.1core.org > [rank 2] -> node-2.1core.org @@ -82,7 +82,7 @@ $ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -platform ../../../examples ! output sort p Test torus -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_torus.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_torus.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error > [rank 0] -> node-0.simgrid.org > [rank 1] -> node-1.simgrid.org > [rank 2] -> node-2.simgrid.org @@ -122,7 +122,7 @@ $ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_clust ! output sort p Test fat tree -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_fat_tree.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_fat_tree.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error > [rank 0] -> node-0.simgrid.org > [rank 1] -> node-1.simgrid.org > [rank 2] -> node-2.simgrid.org @@ -162,7 +162,7 @@ $ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_clust ! output sort p Test fat tree IB -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_fat_tree.xml -np 12 --cfg=network/model:IB --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_fat_tree.xml -np 12 --cfg=network/model:IB --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error > [rank 0] -> node-0.simgrid.org > [rank 1] -> node-1.simgrid.org > [rank 2] -> node-2.simgrid.org @@ -202,7 +202,7 @@ $ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_clust ! output sort p Test Dragonfly -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_dragonfly.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_dragonfly.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error > [rank 0] -> node-0.simgrid.org > [rank 1] -> node-1.simgrid.org > [rank 2] -> node-2.simgrid.org diff --git a/teshsuite/smpi/coll-alltoall/coll-alltoall.c b/teshsuite/smpi/coll-alltoall/coll-alltoall.c index 97596c2445..09e844ac24 100644 --- a/teshsuite/smpi/coll-alltoall/coll-alltoall.c +++ b/teshsuite/smpi/coll-alltoall/coll-alltoall.c @@ -33,8 +33,31 @@ int main(int argc, char *argv[]) for (i = 0; i < size; i++) printf("%d ", sb[i]); printf("]\n"); + int count=1; - status = MPI_Alltoall(sb, 1, MPI_INT, rb, 1, MPI_INT, MPI_COMM_WORLD); + status = MPI_Alltoall(NULL, count, MPI_INT, rb, count, MPI_INT, MPI_COMM_WORLD); + if(status!=MPI_ERR_BUFFER) + printf("MPI_Alltoall did not return MPI_ERR_BUFFER for empty sendbuf\n"); + status = MPI_Alltoall(sb, -1, MPI_INT, rb, count, MPI_INT, MPI_COMM_WORLD); + if(status!=MPI_ERR_COUNT) + printf("MPI_Alltoall did not return MPI_ERR_COUNT for -1 sendcount\n"); + status = MPI_Alltoall(sb, count, MPI_DATATYPE_NULL, rb, count, MPI_INT, MPI_COMM_WORLD); + if(status!=MPI_ERR_TYPE) + printf("MPI_Alltoall did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL sendtype\n"); + status = MPI_Alltoall(sb, count, MPI_INT, NULL, count, MPI_INT, MPI_COMM_WORLD); + if(status!=MPI_ERR_BUFFER) + printf("MPI_Alltoall did not return MPI_ERR_BUFFER for empty recvbuf\n"); + status = MPI_Alltoall(sb, count, MPI_INT, rb, -1, MPI_INT, MPI_COMM_WORLD); + if(status!=MPI_ERR_COUNT) + printf("MPI_Alltoall did not return MPI_ERR_COUNT for -1 recvcount\n"); + status = MPI_Alltoall(sb, count, MPI_INT, rb, count, MPI_DATATYPE_NULL, MPI_COMM_WORLD); + if(status!=MPI_ERR_TYPE) + printf("MPI_Alltoall did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL recvtype\n"); + status = MPI_Alltoall(sb, count, MPI_INT, rb, count, MPI_INT, MPI_COMM_NULL); + if(status!=MPI_ERR_COMM) + printf("MPI_Alltoall did not return MPI_ERR_COMM for MPI_COMM_NULL comm\n"); + + status = MPI_Alltoall(sb, count, MPI_INT, rb, count, MPI_INT, MPI_COMM_WORLD); printf("[%d] rcvbuf=[", rank); for (i = 0; i < size; i++) diff --git a/teshsuite/smpi/coll-alltoall/coll-alltoall.tesh b/teshsuite/smpi/coll-alltoall/coll-alltoall.tesh index f1dc1e52cf..b1e8e8cde4 100644 --- a/teshsuite/smpi/coll-alltoall/coll-alltoall.tesh +++ b/teshsuite/smpi/coll-alltoall/coll-alltoall.tesh @@ -2,7 +2,7 @@ ! output sort p Test all to all -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ${bindir:=.}/../hostfile_coll -platform ${platfdir}/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ${bindir:=.}/../hostfile_coll -platform ${platfdir}/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error > [rank 0] -> Tremblay > [rank 1] -> Tremblay > [rank 2] -> Tremblay diff --git a/teshsuite/smpi/coll-alltoallv/coll-alltoallv.c b/teshsuite/smpi/coll-alltoallv/coll-alltoallv.c index ed1c5bbc6a..aa777e0795 100644 --- a/teshsuite/smpi/coll-alltoallv/coll-alltoallv.c +++ b/teshsuite/smpi/coll-alltoallv/coll-alltoallv.c @@ -38,7 +38,7 @@ <2> sdisp: (#3): [0][1][3] <2> rdisp: (#3): [0][2][4] - after MPI_Alltoallv : + after MPI_Alltoallvv : <0> rbuf: (#9): [-1][-1][-1][-1][-1][-1][-1][-1][-1] <1> rbuf: (#9): [1][101][201][-1][-1][-1][-1][-1][-1] <2> rbuf: (#9): [3][4][103][104][203][204][-1][-1][-1] @@ -93,6 +93,35 @@ int main(int argc, char **argv) rdispls[i] = i * rank; sdispls[i] = (i * (i + 1)) / 2; } + int status; + + status = MPI_Alltoallv(NULL, sendcounts, sdispls, MPI_INT, rbuf, recvcounts, rdispls, MPI_INT, MPI_COMM_WORLD); + if(status!=MPI_ERR_BUFFER) + printf("MPI_Alltoallv did not return MPI_ERR_BUFFER for empty sendbuf\n"); + status = MPI_Alltoallv(sbuf, NULL, sdispls, MPI_INT, rbuf, recvcounts, rdispls, MPI_INT, MPI_COMM_WORLD); + if(status!=MPI_ERR_ARG) + printf("MPI_Alltoallv did not return MPI_ERR_ARG for NULL sendcounts\n"); + status = MPI_Alltoallv(sbuf, sendcounts, NULL, MPI_INT, rbuf, recvcounts, rdispls, MPI_INT, MPI_COMM_WORLD); + if(status!=MPI_ERR_ARG) + printf("MPI_Alltoallv did not return MPI_ERR_ARG for NULL senddispl\n"); + status = MPI_Alltoallv(sbuf, sendcounts, sdispls, MPI_DATATYPE_NULL, rbuf, recvcounts, rdispls, MPI_INT, MPI_COMM_WORLD); + if(status!=MPI_ERR_TYPE) + printf("MPI_Alltoallv did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL sendtype\n"); + status = MPI_Alltoallv(sbuf, sendcounts, sdispls, MPI_INT, NULL, recvcounts, rdispls, MPI_INT, MPI_COMM_WORLD); + if(status!=MPI_ERR_BUFFER) + printf("MPI_Alltoallv did not return MPI_ERR_BUFFER for empty recvbuf\n"); + status = MPI_Alltoallv(sbuf, sendcounts, sdispls, MPI_INT, rbuf, NULL, rdispls, MPI_INT, MPI_COMM_WORLD); + if(status!=MPI_ERR_ARG) + printf("MPI_Alltoallv did not return MPI_ERR_ARG for NULL recvcounts\n"); + status = MPI_Alltoallv(sbuf, sendcounts, sdispls, MPI_INT, rbuf, recvcounts, NULL, MPI_INT, MPI_COMM_WORLD); + if(status!=MPI_ERR_ARG) + printf("MPI_Alltoallv did not return MPI_ERR_ARG for NULL recvdispl\n"); + status = MPI_Alltoallv(sbuf, sendcounts, sdispls, MPI_INT, rbuf, recvcounts, rdispls, MPI_DATATYPE_NULL, MPI_COMM_WORLD); + if(status!=MPI_ERR_TYPE) + printf("MPI_Alltoallv did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL recvtype\n"); + status = MPI_Alltoallv(sbuf, sendcounts, sdispls, MPI_INT, rbuf, recvcounts, rdispls, MPI_INT, MPI_COMM_NULL); + if(status!=MPI_ERR_COMM) + printf("MPI_Alltoallv did not return MPI_ERR_COMM for MPI_COMM_NULL comm\n"); print_buffer_int(sbuf, size2, "sbuf:", rank); print_buffer_int(sendcounts, size, "scount:", rank); diff --git a/teshsuite/smpi/coll-alltoallv/coll-alltoallv.tesh b/teshsuite/smpi/coll-alltoallv/coll-alltoallv.tesh index 02acc662d7..aabb597f49 100644 --- a/teshsuite/smpi/coll-alltoallv/coll-alltoallv.tesh +++ b/teshsuite/smpi/coll-alltoallv/coll-alltoallv.tesh @@ -2,7 +2,7 @@ ! output sort p Test all to all -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoallv --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoallv --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error > [rank 0] -> Tremblay > [rank 1] -> Tremblay > [rank 2] -> Tremblay diff --git a/teshsuite/smpi/coll-barrier/coll-barrier.c b/teshsuite/smpi/coll-barrier/coll-barrier.c index d0a6bc9b0f..3909496bff 100644 --- a/teshsuite/smpi/coll-barrier/coll-barrier.c +++ b/teshsuite/smpi/coll-barrier/coll-barrier.c @@ -16,6 +16,10 @@ int main(int argc, char **argv) MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); + int status = MPI_Barrier(MPI_COMM_NULL); + if(status!=MPI_ERR_COMM) + printf("MPI_Barrier did not return MPI_ERR_COMM for MPI_COMM_NULL comm\n"); + MPI_Barrier(MPI_COMM_WORLD); if (0 == rank) { diff --git a/teshsuite/smpi/coll-barrier/coll-barrier.tesh b/teshsuite/smpi/coll-barrier/coll-barrier.tesh index 6f92664c2a..b8faa4db4c 100644 --- a/teshsuite/smpi/coll-barrier/coll-barrier.tesh +++ b/teshsuite/smpi/coll-barrier/coll-barrier.tesh @@ -2,7 +2,7 @@ ! output sort p Test barrier -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-barrier --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-barrier --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error > ... Barrier .... > [rank 0] -> Tremblay > [rank 1] -> Tremblay diff --git a/teshsuite/smpi/coll-bcast/coll-bcast.c b/teshsuite/smpi/coll-bcast/coll-bcast.c index 4a145d50bf..bf88289e5f 100644 --- a/teshsuite/smpi/coll-bcast/coll-bcast.c +++ b/teshsuite/smpi/coll-bcast/coll-bcast.c @@ -13,6 +13,7 @@ int main(int argc, char **argv) int size; int rank; int count = 2048; + int status; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); @@ -23,6 +24,25 @@ int main(int argc, char **argv) for (i = 0; i < count; i++) values[i] = (0 == rank) ? 17 : 3; + status = MPI_Bcast(NULL, count, MPI_INT, 0, MPI_COMM_WORLD); + if(status!=MPI_ERR_BUFFER) + printf("MPI_Bcast did not return MPI_ERR_BUFFER for empty sendbuf\n"); + status = MPI_Bcast(values, -1, MPI_INT, 0, MPI_COMM_WORLD); + if(status!=MPI_ERR_COUNT) + printf("MPI_Bcast did not return MPI_ERR_COUNT for -1 sendcount\n"); + status = MPI_Bcast(values, count, MPI_DATATYPE_NULL, 0, MPI_COMM_WORLD); + if(status!=MPI_ERR_TYPE) + printf("MPI_Bcast did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL sendtype\n"); + status = MPI_Bcast(values, count, MPI_INT, -1, MPI_COMM_WORLD); + if(status!=MPI_ERR_ROOT) + printf("MPI_Bcast did not return MPI_ERR_ROOT for -1 root\n"); + status = MPI_Bcast(values, count, MPI_INT, size, MPI_COMM_WORLD); + if(status!=MPI_ERR_ROOT) + printf("MPI_Bcast did not return MPI_ERR_ROOT for root > size\n"); + status = MPI_Bcast(values, count, MPI_INT, 0, MPI_COMM_NULL); + if(status!=MPI_ERR_COMM) + printf("MPI_Bcast did not return MPI_ERR_COMM for MPI_COMM_NULL comm\n"); + MPI_Bcast(values, count, MPI_INT, 0, MPI_COMM_WORLD); int good = 0; @@ -39,7 +59,7 @@ int main(int argc, char **argv) for (i = 0; i < count; i++) values[i] = (size -1 == rank) ? 17 : 3; - int status = MPI_Bcast(values, count, MPI_INT, size-1, MPI_COMM_WORLD); + status = MPI_Bcast(values, count, MPI_INT, size-1, MPI_COMM_WORLD); good = 0; for (i = 0; i < count; i++) diff --git a/teshsuite/smpi/coll-bcast/coll-bcast.tesh b/teshsuite/smpi/coll-bcast/coll-bcast.tesh index 32d741e360..43dcf103a7 100644 --- a/teshsuite/smpi/coll-bcast/coll-bcast.tesh +++ b/teshsuite/smpi/coll-bcast/coll-bcast.tesh @@ -1,7 +1,7 @@ p Test Broadcast with more processes than hosts ! output sort -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-bcast --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-bcast --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error > [rank 0] -> Tremblay > [rank 1] -> Tremblay > [rank 2] -> Tremblay diff --git a/teshsuite/smpi/coll-gather/coll-gather.c b/teshsuite/smpi/coll-gather/coll-gather.c index 0097c351d8..fab25b679a 100644 --- a/teshsuite/smpi/coll-gather/coll-gather.c +++ b/teshsuite/smpi/coll-gather/coll-gather.c @@ -36,6 +36,25 @@ int main(int argc, char *argv[]) printf("%d ", sb[i]); printf("]\n"); + status = MPI_Gather(NULL, count, MPI_INT, rb, count, MPI_INT, root, MPI_COMM_WORLD); + if(status!=MPI_ERR_BUFFER) + printf("MPI_Gather did not return MPI_ERR_BUFFER for empty sendbuf\n"); + status = MPI_Gather(sb, -1, MPI_INT, rb, count, MPI_INT, root, MPI_COMM_WORLD); + if(status!=MPI_ERR_COUNT) + printf("MPI_Gather did not return MPI_ERR_COUNT for -1 sendcount\n"); + status = MPI_Gather(sb, count, MPI_DATATYPE_NULL, rb, count, MPI_INT, root, MPI_COMM_WORLD); + if(status!=MPI_ERR_TYPE) + printf("MPI_Gather did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL sendtype\n"); + status = MPI_Gather(sb, count, MPI_INT, rb, count, MPI_INT, -1, MPI_COMM_WORLD); + if(status!=MPI_ERR_ROOT) + printf("MPI_Gather did not return MPI_ERR_ROOT for root -1\n"); + status = MPI_Gather(sb, count, MPI_INT, rb, count, MPI_INT, size+1, MPI_COMM_WORLD); + if(status!=MPI_ERR_ROOT) + printf("MPI_Gather did not return MPI_ERR_ROOT for root > size\n"); + status = MPI_Gather(sb, count, MPI_INT, rb, count, MPI_INT, root, MPI_COMM_NULL); + if(status!=MPI_ERR_COMM) + printf("MPI_Gather did not return MPI_ERR_COMM for MPI_COMM_NULL comm\n"); + status = MPI_Gather(sb, count, MPI_INT, rb, count, MPI_INT, root, MPI_COMM_WORLD); if (rank == root) { @@ -45,7 +64,7 @@ int main(int argc, char *argv[]) printf("]\n"); if (status != MPI_SUCCESS) { - printf("allgather returned %d\n", status); + printf("gather returned %d\n", status); fflush(stdout); } } diff --git a/teshsuite/smpi/coll-gather/coll-gather.tesh b/teshsuite/smpi/coll-gather/coll-gather.tesh index 99efd2b02a..84fcf65209 100644 --- a/teshsuite/smpi/coll-gather/coll-gather.tesh +++ b/teshsuite/smpi/coll-gather/coll-gather.tesh @@ -3,7 +3,7 @@ ! timeout 30 p Test all to all -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-gather --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-gather --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error > [rank 0] -> Tremblay > [rank 1] -> Tremblay > [rank 2] -> Tremblay diff --git a/teshsuite/smpi/coll-reduce-scatter/coll-reduce-scatter.c b/teshsuite/smpi/coll-reduce-scatter/coll-reduce-scatter.c index a6cfcdf5d8..34d2e404f6 100644 --- a/teshsuite/smpi/coll-reduce-scatter/coll-reduce-scatter.c +++ b/teshsuite/smpi/coll-reduce-scatter/coll-reduce-scatter.c @@ -1,5 +1,5 @@ /* Copyright (c) 2013-2019. The SimGrid Team. - * All rights reserved. */ + * All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ @@ -16,43 +16,64 @@ int main( int argc, char **argv ) { - int err = 0; - int toterr; - int size; - int rank; - int i; - MPI_Comm comm; - - MPI_Init( &argc, &argv ); - comm = MPI_COMM_WORLD; - - MPI_Comm_size( comm, &size ); - MPI_Comm_rank( comm, &rank ); - int* sendbuf = (int *) malloc( size * sizeof(int) ); - for (i=0; i No Errors > [rank 0] -> Tremblay > [rank 10] -> Fafard diff --git a/teshsuite/smpi/coll-reduce/coll-reduce.c b/teshsuite/smpi/coll-reduce/coll-reduce.c index cf69db412f..6bec6714a4 100644 --- a/teshsuite/smpi/coll-reduce/coll-reduce.c +++ b/teshsuite/smpi/coll-reduce/coll-reduce.c @@ -32,8 +32,30 @@ int main(int argc, char *argv[]) for (i = 0; i < size; i++) printf("%llu ", sb[i]); printf("]\n"); - int root=0; + + status = MPI_Reduce(NULL, rb, size, MPI_UNSIGNED_LONG_LONG, MPI_SUM, root, MPI_COMM_WORLD); + if(status!=MPI_ERR_BUFFER) + printf("MPI_Reduce did not return MPI_ERR_BUFFER for empty sendbuf\n"); + status = MPI_Reduce(sb, rb, -1, MPI_UNSIGNED_LONG_LONG, MPI_SUM, root, MPI_COMM_WORLD); + if(status!=MPI_ERR_COUNT) + printf("MPI_Reduce did not return MPI_ERR_COUNT for -1 count\n"); + status = MPI_Reduce(sb, rb, size, MPI_DATATYPE_NULL, MPI_SUM, root, MPI_COMM_WORLD); + if(status!=MPI_ERR_TYPE) + printf("MPI_Reduce did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL type\n"); + status = MPI_Reduce(sb, rb, size, MPI_UNSIGNED_LONG_LONG, MPI_OP_NULL, root, MPI_COMM_WORLD); + if(status!=MPI_ERR_OP) + printf("MPI_Reduce did not return MPI_ERR_COMM for MPI_OP_NULL op\n"); + status = MPI_Reduce(sb, rb, size, MPI_UNSIGNED_LONG_LONG, MPI_SUM, -1, MPI_COMM_WORLD); + if(status!=MPI_ERR_ROOT) + printf("MPI_Reduce did not return MPI_ERR_ROOT for root -1\n"); + status = MPI_Reduce(sb, rb, size, MPI_UNSIGNED_LONG_LONG, MPI_SUM, size+1, MPI_COMM_WORLD); + if(status!=MPI_ERR_ROOT) + printf("MPI_Reduce did not return MPI_ERR_ROOT for root > size\n"); + status = MPI_Reduce(sb, rb, size, MPI_UNSIGNED_LONG_LONG, MPI_SUM, root, MPI_COMM_NULL); + if(status!=MPI_ERR_COMM) + printf("MPI_Reduce did not return MPI_ERR_COMM for MPI_COMM_NULL comm\n"); + status = MPI_Reduce(sb, rb, size, MPI_UNSIGNED_LONG_LONG, MPI_SUM, root, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); diff --git a/teshsuite/smpi/coll-reduce/coll-reduce.tesh b/teshsuite/smpi/coll-reduce/coll-reduce.tesh index bbab866811..69433de174 100644 --- a/teshsuite/smpi/coll-reduce/coll-reduce.tesh +++ b/teshsuite/smpi/coll-reduce/coll-reduce.tesh @@ -2,7 +2,7 @@ ! output sort p Test allreduce -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-reduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-reduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error > [0] rcvbuf=[1920 1936 1952 1968 1984 2000 2016 2032 2048 2064 2080 2096 2112 2128 2144 2160 ] > [0] second sndbuf=[0 ] > [0] sndbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 ] diff --git a/teshsuite/smpi/coll-scatter/coll-scatter.c b/teshsuite/smpi/coll-scatter/coll-scatter.c index 123297717d..cedd15fc01 100644 --- a/teshsuite/smpi/coll-scatter/coll-scatter.c +++ b/teshsuite/smpi/coll-scatter/coll-scatter.c @@ -9,42 +9,60 @@ int main(int argc, char **argv) { - int size; - int rank; - int success = 1; - int retval; - int sendcount = 1; // one double to each process - int recvcount = 1; - double *sndbuf = NULL; - double rcvd; - int root = 0; // arbitrary choice - - MPI_Init(&argc, &argv); - MPI_Comm_size(MPI_COMM_WORLD, &size); - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - - // on root, initialize sendbuf - if (root == rank) { - sndbuf = malloc(size * sizeof(double)); - for (int i = 0; i < size; i++) { - sndbuf[i] = (double) i; - } - } - - retval = MPI_Scatter(sndbuf, sendcount, MPI_DOUBLE, &rcvd, recvcount, MPI_DOUBLE, root, MPI_COMM_WORLD); - if (root == rank) { - free(sndbuf); - } - if (retval != MPI_SUCCESS) { - fprintf(stderr, "(%s:%d) MPI_Scatter() returned retval=%d\n", __FILE__, __LINE__, retval); - return 0; - } - // verification - if ((double) rank != rcvd) { - fprintf(stderr, "[%d] has %f instead of %d\n", rank, rcvd, rank); - success = 0; - } + int size; + int rank; + int success = 1; + int retval; + int sendcount = 1; // one double to each process + int recvcount = 1; + double *sndbuf = NULL; + double rcvd; + int root = 0; // arbitrary choice + MPI_Init(&argc, &argv); + MPI_Comm_size(MPI_COMM_WORLD, &size); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + // on root, initialize sendbuf + if (root == rank) { + sndbuf = malloc(size * sizeof(double)); + for (int i = 0; i < size; i++) { + sndbuf[i] = (double) i; + } + } + + retval = MPI_Scatter(sndbuf, sendcount, MPI_DOUBLE, NULL, recvcount, MPI_DOUBLE, root, MPI_COMM_WORLD); + if(retval!=MPI_ERR_BUFFER) + printf("MPI_Scatter did not return MPI_ERR_BUFFER for empty recvbuf\n"); + retval = MPI_Scatter(sndbuf, sendcount, MPI_DOUBLE, &rcvd, -1, MPI_DOUBLE, root, MPI_COMM_WORLD); + if(retval!=MPI_ERR_COUNT) + printf("MPI_Scatter did not return MPI_ERR_COUNT for -1 recvcount\n"); + retval = MPI_Scatter(sndbuf, sendcount, MPI_DOUBLE, &rcvd, recvcount, MPI_DATATYPE_NULL, root, MPI_COMM_WORLD); + if(retval!=MPI_ERR_TYPE) + printf("MPI_Scatter did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL sendtype\n"); + retval = MPI_Scatter(sndbuf, sendcount, MPI_DOUBLE, &rcvd, recvcount, MPI_DOUBLE, -1, MPI_COMM_WORLD); + if(retval!=MPI_ERR_ROOT) + printf("MPI_Scatter did not return MPI_ERR_ROOT for root -1\n"); + retval = MPI_Scatter(sndbuf, sendcount, MPI_DOUBLE, &rcvd, recvcount, MPI_DOUBLE, size+1, MPI_COMM_WORLD); + if(retval!=MPI_ERR_ROOT) + printf("MPI_Scatter did not return MPI_ERR_ROOT for root > size\n"); + retval = MPI_Scatter(sndbuf, sendcount, MPI_DOUBLE, &rcvd, recvcount, MPI_DOUBLE, root, MPI_COMM_NULL); + if(retval!=MPI_ERR_COMM) + printf("MPI_Scatter did not return MPI_ERR_COMM for MPI_COMM_NULL comm\n"); + + retval = MPI_Scatter(sndbuf, sendcount, MPI_DOUBLE, &rcvd, recvcount, MPI_DOUBLE, root, MPI_COMM_WORLD); + if (root == rank) { + free(sndbuf); + } + if (retval != MPI_SUCCESS) { + fprintf(stderr, "(%s:%d) MPI_Scatter() returned retval=%d\n", __FILE__, __LINE__, retval); + return 0; + } + // verification + if ((double) rank != rcvd) { + fprintf(stderr, "[%d] has %f instead of %d\n", rank, rcvd, rank); + success = 0; + } /* test 1 */ if (0 == rank) printf("** Small Test Result: ...\n"); diff --git a/teshsuite/smpi/coll-scatter/coll-scatter.tesh b/teshsuite/smpi/coll-scatter/coll-scatter.tesh index a7bd5a65f8..a7f0556e4f 100644 --- a/teshsuite/smpi/coll-scatter/coll-scatter.tesh +++ b/teshsuite/smpi/coll-scatter/coll-scatter.tesh @@ -2,7 +2,7 @@ ! output sort p Test scatter -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-scatter --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-scatter --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error > [0] ok. > [10] ok. > [11] ok.