X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/766da6343971d93f860c3b336ab3813f2e0ba404..9c300852ad4204e21d42402f7d1bbb266cf0b27f:/src/smpi/smpi_base.cpp diff --git a/src/smpi/smpi_base.cpp b/src/smpi/smpi_base.cpp index 9b09b82a21..bd20dacbc0 100644 --- a/src/smpi/smpi_base.cpp +++ b/src/smpi/smpi_base.cpp @@ -19,11 +19,11 @@ #include "simgrid/sg_config.h" #include "colls/colls.h" -#include "src/simix/SynchroComm.hpp" +#include "src/kernel/activity/SynchroComm.hpp" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_base, smpi, "Logging specific to SMPI (base)"); -static int match_recv(void* a, void* b, smx_synchro_t ignored) { +static int match_recv(void* a, void* b, smx_activity_t ignored) { MPI_Request ref = static_cast(a); MPI_Request req = static_cast(b); XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag); @@ -46,7 +46,7 @@ static int match_recv(void* a, void* b, smx_synchro_t ignored) { }else return 0; } -static int match_send(void* a, void* b,smx_synchro_t ignored) { +static int match_send(void* a, void* b,smx_activity_t ignored) { MPI_Request ref = static_cast(a); MPI_Request req = static_cast(b); XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag); @@ -90,16 +90,9 @@ static simgrid::config::Flag smpi_iprobe_sleep( static simgrid::config::Flag smpi_test_sleep( "smpi/test", "Minimum time to inject inside a call to MPI_Test", 1e-4); -static int factor_cmp(const s_smpi_factor_multival_t& pa, const s_smpi_factor_multival_t& pb) -{ - return (pa.factor > pb.factor) ? 1 : - (pa.factor < pb.factor) ? -1 : 0; -} - static std::vector parse_factor(const char *smpi_coef_string) { std::vector smpi_factor; - s_smpi_factor_multival_t fact; /** Setup the tokenizer that parses the string **/ typedef boost::tokenizer> Tokenizer; @@ -119,7 +112,7 @@ static std::vector parse_factor(const char *smpi_coef_ token_iter != tokens.end(); token_iter++) { XBT_DEBUG("token : %s", token_iter->c_str()); Tokenizer factor_values(*token_iter, factor_separator); - + s_smpi_factor_multival_t fact; if (factor_values.begin() == factor_values.end()) { xbt_die("Malformed radical for smpi factor: '%s'", smpi_coef_string); } @@ -134,17 +127,21 @@ XBT_DEBUG("token : %s", token_iter->c_str()); } else { errmsg = bprintf("Invalid factor value %d in chunk #%zu: %%s", iteration, smpi_factor.size()+1); - fact.values.push_back(xbt_str_parse_double((*factor_iter).c_str(), errmsg)); + fact.values.push_back(xbt_str_parse_double(factor_iter->c_str(), errmsg)); } xbt_free(errmsg); } smpi_factor.push_back(fact); - XBT_DEBUG("smpi_factor:\t%ld : %zu values, first: %f", fact.factor, smpi_factor.size(), fact.values[0]); + XBT_DEBUG("smpi_factor:\t%zu : %zu values, first: %f", fact.factor, smpi_factor.size(), fact.values[0]); } - std::sort(smpi_factor.begin(), smpi_factor.end(), &factor_cmp); + std::sort(smpi_factor.begin(), smpi_factor.end(), + [](const s_smpi_factor_multival_t &pa, + const s_smpi_factor_multival_t &pb) { + return (pa.factor < pb.factor); + }); for (auto& fact : smpi_factor) { - XBT_DEBUG("smpi_factor:\t%ld : %zu values, first: %f", fact.factor, smpi_factor.size() ,fact.values[0]); + XBT_DEBUG("smpi_factor:\t%zu : %zu values, first: %f", fact.factor, smpi_factor.size() ,fact.values[0]); } return smpi_factor; @@ -159,11 +156,11 @@ static double smpi_os(size_t size) // Iterate over all the sections that were specified and find the right // value. (fact.factor represents the interval sizes; we want to find the // section that has fact.factor <= size and no other such fact.factor <= size) - // Note: parse_factor() (used before) already sorts the dynar we iterate over! + // Note: parse_factor() (used before) already sorts the vector we iterate over! for (auto& fact : smpi_os_values) { if (size <= fact.factor) { // Values already too large, use the previously // computed value of current! - XBT_DEBUG("os : %zu <= %ld return %.10f", size, fact.factor, current); + XBT_DEBUG("os : %zu <= %zu return %.10f", size, fact.factor, current); return current; }else{ // If the next section is too large, the current section must be used. @@ -184,10 +181,10 @@ static double smpi_ois(size_t size) double current=smpi_ois_values.empty()?0.0:smpi_ois_values[0].values[0]+smpi_ois_values[0].values[1]*size; // Iterate over all the sections that were specified and find the right value. (fact.factor represents the interval // sizes; we want to find the section that has fact.factor <= size and no other such fact.factor <= size) - // Note: parse_factor() (used before) already sorts the dynar we iterate over! + // Note: parse_factor() (used before) already sorts the vector we iterate over! for (auto& fact : smpi_ois_values) { if (size <= fact.factor) { // Values already too large, use the previously computed value of current! - XBT_DEBUG("ois : %zu <= %ld return %.10f", size, fact.factor, current); + XBT_DEBUG("ois : %zu <= %zu return %.10f", size, fact.factor, current); return current; }else{ // If the next section is too large, the current section must be used. @@ -206,14 +203,14 @@ static double smpi_or(size_t size) smpi_or_values = parse_factor(xbt_cfg_get_string("smpi/or")); } - double current=smpi_or_values.empty()?0.0:smpi_or_values[0].values[0]+smpi_or_values[0].values[1]*size; + double current=smpi_or_values.empty()?0.0:smpi_or_values.front().values[0]+smpi_or_values.front().values[1]*size; + // Iterate over all the sections that were specified and find the right value. (fact.factor represents the interval // sizes; we want to find the section that has fact.factor <= size and no other such fact.factor <= size) - // Note: parse_factor() (used before) already sorts the dynar we iterate over! + // Note: parse_factor() (used before) already sorts the vector we iterate over! for (auto fact : smpi_or_values) { - if (size <= fact.factor) { // Values already too large, use the previously - // computed value of current! - XBT_DEBUG("or : %zu <= %ld return %.10f", size, fact.factor, current); + if (size <= fact.factor) { // Values already too large, use the previously computed value of current! + XBT_DEBUG("or : %zu <= %zu return %.10f", size, fact.factor, current); return current; } else { // If the next section is too large, the current section must be used. @@ -233,9 +230,7 @@ void smpi_mpi_init() { double smpi_mpi_wtime(){ double time; - if (smpi_process_initialized() != 0 && - smpi_process_finalized() == 0 && - smpi_process_get_sampling() == 0) { + if (smpi_process_initialized() != 0 && smpi_process_finalized() == 0 && smpi_process_get_sampling() == 0) { smpi_bench_end(); time = SIMIX_get_clock(); // to avoid deadlocks if used as a break condition, such as @@ -375,7 +370,7 @@ void smpi_mpi_start(MPI_Request request) //begin with the more appropriate one : the small one. mailbox = smpi_process_mailbox_small(); XBT_DEBUG("Is there a corresponding send already posted in the small mailbox %p (in case of SSEND)?", mailbox); - smx_synchro_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, static_cast(request)); + smx_activity_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, static_cast(request)); if (action == nullptr) { mailbox = smpi_process_mailbox(); @@ -393,7 +388,7 @@ void smpi_mpi_start(MPI_Request request) else { mailbox = smpi_process_mailbox_small(); XBT_DEBUG("Is there a corresponding send already posted the small mailbox?"); - smx_synchro_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, (void*)request); + smx_activity_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, (void*)request); if (action == nullptr) { XBT_DEBUG("No, nothing in the permanent receive mailbox"); @@ -471,7 +466,7 @@ void smpi_mpi_start(MPI_Request request) else if (((request->flags & RMA) != 0) || static_cast(request->size) < async_small_thresh) { // eager mode mailbox = smpi_process_remote_mailbox(receiver); XBT_DEBUG("Is there a corresponding recv already posted in the large mailbox %p?", mailbox); - smx_synchro_t action = simcall_comm_iprobe(mailbox, 1,request->dst, request->tag, &match_send, static_cast(request)); + smx_activity_t action = simcall_comm_iprobe(mailbox, 1,request->dst, request->tag, &match_send, static_cast(request)); if (action == nullptr) { if ((request->flags & SSEND) == 0){ mailbox = smpi_process_remote_mailbox_small(receiver); @@ -775,16 +770,18 @@ int smpi_mpi_test(MPI_Request * request, MPI_Status * status) { int smpi_mpi_testany(int count, MPI_Request requests[], int *index, MPI_Status * status) { - xbt_dynar_t comms; + std::vector comms; + comms.reserve(count); + int i; int flag = 0; *index = MPI_UNDEFINED; - comms = xbt_dynar_new(sizeof(smx_synchro_t), nullptr); + std::vector map; /** Maps all matching comms back to their location in requests **/ for(i = 0; i < count; i++) { if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action && !(requests[i]->flags & PREPARED)) { - xbt_dynar_push(comms, &requests[i]->action); + comms.push_back(requests[i]->action); map.push_back(i); } } @@ -794,7 +791,7 @@ int smpi_mpi_testany(int count, MPI_Request requests[], int *index, MPI_Status * if(smpi_test_sleep > 0) simcall_process_sleep(nsleeps*smpi_test_sleep); - i = simcall_comm_testany(comms); // The i-th element in comms matches! + i = simcall_comm_testany(comms.data(), comms.size()); // The i-th element in comms matches! if (i != -1) { // -1 is not MPI_UNDEFINED but a SIMIX return code. (nothing matches) *index = map[i]; finish_wait(&requests[*index], status); @@ -811,7 +808,6 @@ int smpi_mpi_testany(int count, MPI_Request requests[], int *index, MPI_Status * flag = 1; smpi_empty_status(status); } - xbt_dynar_free(&comms); return flag; } @@ -821,8 +817,7 @@ int smpi_mpi_testall(int count, MPI_Request requests[], MPI_Status status[]) MPI_Status stat; MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat; int flag=1; - int i; - for(i=0; iflags & PREPARED)) { if (smpi_mpi_test(&requests[i], pstat)!=1){ flag=0; @@ -878,8 +873,8 @@ void smpi_mpi_iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* } if (request->action != nullptr){ - simgrid::simix::Comm *sync_comm = static_cast(request->action); - MPI_Request req = static_cast(sync_comm->src_data); + simgrid::kernel::activity::Comm *sync_comm = static_cast(request->action); + MPI_Request req = static_cast(sync_comm->src_data); *flag = 1; if(status != MPI_STATUS_IGNORE && (req->flags & PREPARED) == 0) { status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(comm), req->src); @@ -925,7 +920,7 @@ int smpi_mpi_waitany(int count, MPI_Request requests[], MPI_Status * status) if(count > 0) { // Wait for a request to complete - comms = xbt_dynar_new(sizeof(smx_synchro_t), nullptr); + comms = xbt_dynar_new(sizeof(smx_activity_t), nullptr); map = xbt_new(int, count); XBT_DEBUG("Wait for one of %d", count); for(i = 0; i < count; i++) { @@ -947,7 +942,7 @@ int smpi_mpi_waitany(int count, MPI_Request requests[], MPI_Status * status) } } if(size > 0) { - i = simcall_comm_waitany(comms); + i = simcall_comm_waitany(comms, -1); // not MPI_UNDEFINED, as this is a simix return code if (i != -1) { @@ -1243,12 +1238,13 @@ void smpi_mpi_scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm) { int system_tag = COLL_TAG_SCATTER; - int rank, size, dst, index; - MPI_Aint lb = 0, sendext = 0; + int dst; + MPI_Aint lb = 0; + MPI_Aint sendext = 0; MPI_Request *requests; - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + int rank = smpi_comm_rank(comm); + int size = smpi_comm_size(comm); if(rank != root) { // Recv buffer from root smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm, MPI_STATUS_IGNORE); @@ -1261,7 +1257,7 @@ void smpi_mpi_scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype, } // Send buffers to receivers requests = xbt_new(MPI_Request, size - 1); - index = 0; + int index = 0; for(dst = 0; dst < size; dst++) { if(dst != root) { requests[index] = smpi_isend_init(static_cast(sendbuf) + dst * sendcount * sendext, sendcount, sendtype, dst, @@ -1283,12 +1279,13 @@ void smpi_mpi_scatterv(void *sendbuf, int *sendcounts, int *displs, MPI_Datatype MPI_Datatype recvtype, int root, MPI_Comm comm) { int system_tag = COLL_TAG_SCATTERV; - int rank, size, dst, index; - MPI_Aint lb = 0, sendext = 0; + int dst; + MPI_Aint lb = 0; + MPI_Aint sendext = 0; MPI_Request *requests; - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + int rank = smpi_comm_rank(comm); + int size = smpi_comm_size(comm); if(rank != root) { // Recv buffer from root smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm, MPI_STATUS_IGNORE); @@ -1301,7 +1298,7 @@ void smpi_mpi_scatterv(void *sendbuf, int *sendcounts, int *displs, MPI_Datatype } // Send buffers to receivers requests = xbt_new(MPI_Request, size - 1); - index = 0; + int index = 0; for(dst = 0; dst < size; dst++) { if(dst != root) { requests[index] = smpi_isend_init(static_cast(sendbuf) + displs[dst] * sendext, sendcounts[dst], @@ -1323,16 +1320,16 @@ void smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datat MPI_Comm comm) { int system_tag = COLL_TAG_REDUCE; - int rank, size, src, index; - MPI_Aint lb = 0, dataext = 0; + int src, index; + MPI_Aint lb = 0; + MPI_Aint dataext = 0; MPI_Request *requests; void **tmpbufs; char* sendtmpbuf = static_cast(sendbuf); - - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + int rank = smpi_comm_rank(comm); + int size = smpi_comm_size(comm); //non commutative case, use a working algo from openmpi if(!smpi_op_is_commute(op)){ smpi_coll_tuned_reduce_ompi_basic_linear(sendtmpbuf, recvbuf, count, datatype, op, root, comm); @@ -1401,13 +1398,13 @@ void smpi_mpi_allreduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype da void smpi_mpi_scan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) { int system_tag = -888; - int rank, size, other, index; + int other, index; MPI_Aint lb = 0, dataext = 0; MPI_Request *requests; void **tmpbufs; - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + int rank = smpi_comm_rank(comm); + int size = smpi_comm_size(comm); smpi_datatype_extent(datatype, &lb, &dataext); @@ -1463,13 +1460,13 @@ void smpi_mpi_scan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatyp void smpi_mpi_exscan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) { int system_tag = -888; - int rank, size, other, index; + int other, index; MPI_Aint lb = 0, dataext = 0; MPI_Request *requests; void **tmpbufs; int recvbuf_is_empty=1; - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + int rank = smpi_comm_rank(comm); + int size = smpi_comm_size(comm); smpi_datatype_extent(datatype, &lb, &dataext); @@ -1479,13 +1476,11 @@ void smpi_mpi_exscan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datat index = 0; for(other = 0; other < rank; other++) { tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext); - requests[index] = - smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm); + requests[index] = smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm); index++; } for(other = rank + 1; other < size; other++) { - requests[index] = - smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm); + requests[index] = smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm); index++; } // Wait for completion of all comms. @@ -1500,9 +1495,9 @@ void smpi_mpi_exscan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datat if(recvbuf_is_empty){ smpi_datatype_copy(tmpbufs[index], count, datatype, recvbuf, count, datatype); recvbuf_is_empty=0; - }else - // #Request is below rank: it's a irecv - smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype); + } else + // #Request is below rank: it's a irecv + smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype); } } }else{ @@ -1513,7 +1508,8 @@ void smpi_mpi_exscan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datat if(recvbuf_is_empty){ smpi_datatype_copy(tmpbufs[other], count, datatype, recvbuf, count, datatype); recvbuf_is_empty=0; - }else smpi_op_apply(op, tmpbufs[other], recvbuf, &count, &datatype); + } else + smpi_op_apply(op, tmpbufs[other], recvbuf, &count, &datatype); } } }