X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/2d2995483b57463581ffdc4365fe1999ddc306c2..efa1f86e31adca3c57e9d0135b2f6ebdbabb0878:/src/smpi/colls/smpi_coll.cpp?ds=sidebyside diff --git a/src/smpi/colls/smpi_coll.cpp b/src/smpi/colls/smpi_coll.cpp index 114af77f56..2dce7e7838 100644 --- a/src/smpi/colls/smpi_coll.cpp +++ b/src/smpi/colls/smpi_coll.cpp @@ -1,6 +1,6 @@ /* smpi_coll.c -- various optimized routing for collectives */ -/* Copyright (c) 2009-2018. The SimGrid Team. All rights reserved. */ +/* Copyright (c) 2009-2019. The SimGrid Team. All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ @@ -13,56 +13,81 @@ #include "smpi_request.hpp" #include "xbt/config.hpp" -XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_coll, smpi, "Logging specific to SMPI (coll)"); +XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_coll, smpi, "Logging specific to SMPI collectives."); #define COLL_SETTER(cat, ret, args, args2) \ - int(*Colls::cat) args; \ - void Colls::set_##cat(std::string name) \ + void colls::_XBT_CONCAT(set_, cat)(const std::string& name) \ { \ - int id = find_coll_description(mpi_coll_##cat##_description, name, #cat); \ - cat = reinterpret_cast(mpi_coll_##cat##_description[id].coll); \ + int id = find_coll_description(_XBT_CONCAT3(mpi_coll_, cat, _description), name, _XBT_STRINGIFY(cat)); \ + cat = reinterpret_cast(_XBT_CONCAT3(mpi_coll_, cat, _description)[id].coll); \ if (cat == nullptr) \ - xbt_die("Collective " #cat " set to nullptr!"); \ + xbt_die("Collective " _XBT_STRINGIFY(cat) " set to nullptr!"); \ } namespace simgrid{ namespace smpi{ -void (*Colls::smpi_coll_cleanup_callback)(); - /* these arrays must be nullptr terminated */ -s_mpi_coll_description_t Colls::mpi_coll_gather_description[] = { - COLL_GATHERS(COLL_DESCRIPTION, COLL_COMMA), {"", "", nullptr} }; -s_mpi_coll_description_t Colls::mpi_coll_allgather_description[] = { - COLL_ALLGATHERS(COLL_DESCRIPTION, COLL_COMMA), {"", "", nullptr} }; -s_mpi_coll_description_t Colls::mpi_coll_allgatherv_description[] = { - COLL_ALLGATHERVS(COLL_DESCRIPTION, COLL_COMMA), {"", "", nullptr} }; -s_mpi_coll_description_t Colls::mpi_coll_allreduce_description[] ={ - COLL_ALLREDUCES(COLL_DESCRIPTION, COLL_COMMA), {"", "", nullptr} }; -s_mpi_coll_description_t Colls::mpi_coll_reduce_scatter_description[] = { - COLL_REDUCE_SCATTERS(COLL_DESCRIPTION, COLL_COMMA), {"", "", nullptr} }; -s_mpi_coll_description_t Colls::mpi_coll_scatter_description[] ={ - COLL_SCATTERS(COLL_DESCRIPTION, COLL_COMMA), {"", "", nullptr} }; -s_mpi_coll_description_t Colls::mpi_coll_barrier_description[] ={ - COLL_BARRIERS(COLL_DESCRIPTION, COLL_COMMA), {"", "", nullptr} }; -s_mpi_coll_description_t Colls::mpi_coll_alltoall_description[] = { - COLL_ALLTOALLS(COLL_DESCRIPTION, COLL_COMMA), {"", "", nullptr} }; -s_mpi_coll_description_t Colls::mpi_coll_alltoallv_description[] = { - COLL_ALLTOALLVS(COLL_DESCRIPTION, COLL_COMMA), {"", "", nullptr} }; -s_mpi_coll_description_t Colls::mpi_coll_bcast_description[] = { - COLL_BCASTS(COLL_DESCRIPTION, COLL_COMMA), {"", "", nullptr} }; -s_mpi_coll_description_t Colls::mpi_coll_reduce_description[] = { - COLL_REDUCES(COLL_DESCRIPTION, COLL_COMMA), {"", "", nullptr} }; +s_mpi_coll_description_t mpi_coll_gather_description[] = {COLL_GATHERS(COLL_DESCRIPTION, COLL_COMMA), + {"", "", nullptr}}; +s_mpi_coll_description_t mpi_coll_allgather_description[] = {COLL_ALLGATHERS(COLL_DESCRIPTION, COLL_COMMA), + {"", "", nullptr}}; +s_mpi_coll_description_t mpi_coll_allgatherv_description[] = {COLL_ALLGATHERVS(COLL_DESCRIPTION, COLL_COMMA), + {"", "", nullptr}}; +s_mpi_coll_description_t mpi_coll_allreduce_description[] = {COLL_ALLREDUCES(COLL_DESCRIPTION, COLL_COMMA), + {"", "", nullptr}}; +s_mpi_coll_description_t mpi_coll_reduce_scatter_description[] = {COLL_REDUCE_SCATTERS(COLL_DESCRIPTION, COLL_COMMA), + {"", "", nullptr}}; +s_mpi_coll_description_t mpi_coll_scatter_description[] = {COLL_SCATTERS(COLL_DESCRIPTION, COLL_COMMA), + {"", "", nullptr}}; +s_mpi_coll_description_t mpi_coll_barrier_description[] = {COLL_BARRIERS(COLL_DESCRIPTION, COLL_COMMA), + {"", "", nullptr}}; +s_mpi_coll_description_t mpi_coll_alltoall_description[] = {COLL_ALLTOALLS(COLL_DESCRIPTION, COLL_COMMA), + {"", "", nullptr}}; +s_mpi_coll_description_t mpi_coll_alltoallv_description[] = {COLL_ALLTOALLVS(COLL_DESCRIPTION, COLL_COMMA), + {"", "", nullptr}}; +s_mpi_coll_description_t mpi_coll_bcast_description[] = {COLL_BCASTS(COLL_DESCRIPTION, COLL_COMMA), {"", "", nullptr}}; +s_mpi_coll_description_t mpi_coll_reduce_description[] = {COLL_REDUCES(COLL_DESCRIPTION, COLL_COMMA), + {"", "", nullptr}}; + +// Needed by the automatic selector weird implementation +s_mpi_coll_description_t* colls::get_smpi_coll_description(const char* name, int rank) +{ + if (strcmp(name, "gather") == 0) + return &mpi_coll_gather_description[rank]; + if (strcmp(name, "allgather") == 0) + return &mpi_coll_allgather_description[rank]; + if (strcmp(name, "allgatherv") == 0) + return &mpi_coll_allgatherv_description[rank]; + if (strcmp(name, "allreduce") == 0) + return &mpi_coll_allreduce_description[rank]; + if (strcmp(name, "reduce_scatter") == 0) + return &mpi_coll_reduce_scatter_description[rank]; + if (strcmp(name, "scatter") == 0) + return &mpi_coll_scatter_description[rank]; + if (strcmp(name, "barrier") == 0) + return &mpi_coll_barrier_description[rank]; + if (strcmp(name, "alltoall") == 0) + return &mpi_coll_alltoall_description[rank]; + if (strcmp(name, "alltoallv") == 0) + return &mpi_coll_alltoallv_description[rank]; + if (strcmp(name, "bcast") == 0) + return &mpi_coll_bcast_description[rank]; + if (strcmp(name, "reduce") == 0) + return &mpi_coll_reduce_description[rank]; + XBT_INFO("You requested an unknown collective: %s", name); + return nullptr; +} /** Displays the long description of all registered models, and quit */ -void Colls::coll_help(const char *category, s_mpi_coll_description_t * table) +void colls::coll_help(const char* category, s_mpi_coll_description_t* table) { XBT_WARN("Long description of the %s models accepted by this simulator:\n", category); for (int i = 0; not table[i].name.empty(); i++) XBT_WARN(" %s: %s\n", table[i].name.c_str(), table[i].description.c_str()); } -int Colls::find_coll_description(s_mpi_coll_description_t* table, std::string name, const char* desc) +int colls::find_coll_description(s_mpi_coll_description_t* table, const std::string& name, const char* desc) { for (int i = 0; not table[i].name.empty(); i++) if (name == table[i].name) { @@ -81,7 +106,39 @@ int Colls::find_coll_description(s_mpi_coll_description_t* table, std::string na return -1; } -COLL_APPLY(COLL_SETTER,COLL_GATHER_SIG,""); +int (*colls::gather)(const void* send_buff, int send_count, MPI_Datatype send_type, void* recv_buff, int recv_count, + MPI_Datatype recv_type, int root, MPI_Comm comm); +int (*colls::allgather)(const void* send_buff, int send_count, MPI_Datatype send_type, void* recv_buff, int recv_count, + MPI_Datatype recv_type, MPI_Comm comm); +int (*colls::allgatherv)(const void* send_buff, int send_count, MPI_Datatype send_type, void* recv_buff, + const int* recv_count, const int* recv_disps, MPI_Datatype recv_type, MPI_Comm comm); +int (*colls::alltoall)(const void* send_buff, int send_count, MPI_Datatype send_type, void* recv_buff, int recv_count, + MPI_Datatype recv_type, MPI_Comm comm); +int (*colls::alltoallv)(const void* send_buff, const int* send_counts, const int* send_disps, MPI_Datatype send_type, + void* recv_buff, const int* recv_counts, const int* recv_disps, MPI_Datatype recv_type, + MPI_Comm comm); +int (*colls::bcast)(void* buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm); +int (*colls::reduce)(const void* buf, void* rbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm); +int (*colls::allreduce)(const void* sbuf, void* rbuf, int rcount, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm); +int (*colls::reduce_scatter)(const void* sbuf, void* rbuf, const int* rcounts, MPI_Datatype dtype, MPI_Op op, + MPI_Comm comm); +int (*colls::scatter)(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount, + MPI_Datatype recvtype, int root, MPI_Comm comm); +int (*colls::barrier)(MPI_Comm comm); + +void (*colls::smpi_coll_cleanup_callback)(); + +void colls::set_gather(const std::string& name) +{ + int id = find_coll_description(mpi_coll_gather_description, name, "gather"); + gather = reinterpret_cast(mpi_coll_gather_description[id].coll); + if (gather == nullptr) + xbt_die("Collective gather set to nullptr!"); +} + +//COLL_APPLY(COLL_SETTER,COLL_GATHER_SIG,""); COLL_APPLY(COLL_SETTER,COLL_ALLGATHER_SIG,""); COLL_APPLY(COLL_SETTER,COLL_ALLGATHERV_SIG,""); COLL_APPLY(COLL_SETTER,COLL_REDUCE_SIG,""); @@ -93,18 +150,19 @@ COLL_APPLY(COLL_SETTER,COLL_BCAST_SIG,""); COLL_APPLY(COLL_SETTER,COLL_ALLTOALL_SIG,""); COLL_APPLY(COLL_SETTER,COLL_ALLTOALLV_SIG,""); -void Colls::set_collectives(){ +void colls::set_collectives() +{ std::string selector_name = simgrid::config::get_value("smpi/coll-selector"); if (selector_name.empty()) selector_name = "default"; std::pair> setter_callbacks[] = { - {"gather", &Colls::set_gather}, {"allgather", &Colls::set_allgather}, - {"allgatherv", &Colls::set_allgatherv}, {"allreduce", &Colls::set_allreduce}, - {"alltoall", &Colls::set_alltoall}, {"alltoallv", &Colls::set_alltoallv}, - {"reduce", &Colls::set_reduce}, {"reduce_scatter", &Colls::set_reduce_scatter}, - {"scatter", &Colls::set_scatter}, {"bcast", &Colls::set_bcast}, - {"barrier", &Colls::set_barrier}}; + {"gather", &colls::set_gather}, {"allgather", &colls::set_allgather}, + {"allgatherv", &colls::set_allgatherv}, {"allreduce", &colls::set_allreduce}, + {"alltoall", &colls::set_alltoall}, {"alltoallv", &colls::set_alltoallv}, + {"reduce", &colls::set_reduce}, {"reduce_scatter", &colls::set_reduce_scatter}, + {"scatter", &colls::set_scatter}, {"bcast", &colls::set_bcast}, + {"barrier", &colls::set_barrier}}; for (auto& elem : setter_callbacks) { std::string name = simgrid::config::get_value(("smpi/" + elem.first).c_str()); @@ -115,90 +173,25 @@ void Colls::set_collectives(){ } } +//Implementations of the single algorithm collectives -//Implementations of the single algorith collectives - -int Colls::gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int *recvcounts, int *displs, - MPI_Datatype recvtype, int root, MPI_Comm comm) +int colls::gatherv(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, const int* recvcounts, + const int* displs, MPI_Datatype recvtype, int root, MPI_Comm comm) { - int system_tag = COLL_TAG_GATHERV; - MPI_Aint lb = 0; - MPI_Aint recvext = 0; - - int rank = comm->rank(); - int size = comm->size(); - if (rank != root) { - // Send buffer to root - Request::send(sendbuf, sendcount, sendtype, root, system_tag, comm); - } else { - recvtype->extent(&lb, &recvext); - // Local copy from root - Datatype::copy(sendbuf, sendcount, sendtype, static_cast(recvbuf) + displs[root] * recvext, - recvcounts[root], recvtype); - // Receive buffers from senders - MPI_Request *requests = xbt_new(MPI_Request, size - 1); - int index = 0; - for (int src = 0; src < size; src++) { - if(src != root) { - requests[index] = Request::irecv_init(static_cast(recvbuf) + displs[src] * recvext, - recvcounts[src], recvtype, src, system_tag, comm); - index++; - } - } - // Wait for completion of irecv's. - Request::startall(size - 1, requests); - Request::waitall(size - 1, requests, MPI_STATUS_IGNORE); - for (int src = 0; src < size-1; src++) { - Request::unref(&requests[src]); - } - xbt_free(requests); - } - return MPI_SUCCESS; + MPI_Request request; + colls::igatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, root, comm, &request, 0); + return Request::wait(&request, MPI_STATUS_IGNORE); } - -int Colls::scatterv(void *sendbuf, int *sendcounts, int *displs, MPI_Datatype sendtype, void *recvbuf, int recvcount, - MPI_Datatype recvtype, int root, MPI_Comm comm) +int colls::scatterv(const void* sendbuf, const int* sendcounts, const int* displs, MPI_Datatype sendtype, void* recvbuf, + int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm) { - int system_tag = COLL_TAG_SCATTERV; - MPI_Aint lb = 0; - MPI_Aint sendext = 0; - - int rank = comm->rank(); - int size = comm->size(); - if(rank != root) { - // Recv buffer from root - Request::recv(recvbuf, recvcount, recvtype, root, system_tag, comm, MPI_STATUS_IGNORE); - } else { - sendtype->extent(&lb, &sendext); - // Local copy from root - if(recvbuf!=MPI_IN_PLACE){ - Datatype::copy(static_cast(sendbuf) + displs[root] * sendext, sendcounts[root], - sendtype, recvbuf, recvcount, recvtype); - } - // Send buffers to receivers - MPI_Request *requests = xbt_new(MPI_Request, size - 1); - int index = 0; - for (int dst = 0; dst < size; dst++) { - if (dst != root) { - requests[index] = Request::isend_init(static_cast(sendbuf) + displs[dst] * sendext, sendcounts[dst], - sendtype, dst, system_tag, comm); - index++; - } - } - // Wait for completion of isend's. - Request::startall(size - 1, requests); - Request::waitall(size - 1, requests, MPI_STATUS_IGNORE); - for (int dst = 0; dst < size-1; dst++) { - Request::unref(&requests[dst]); - } - xbt_free(requests); - } - return MPI_SUCCESS; + MPI_Request request; + colls::iscatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm, &request, 0); + return Request::wait(&request, MPI_STATUS_IGNORE); } - -int Colls::scan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) +int colls::scan(const void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) { int system_tag = -888; MPI_Aint lb = 0; @@ -213,8 +206,8 @@ int Colls::scan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, Datatype::copy(sendbuf, count, datatype, recvbuf, count, datatype); // Send/Recv buffers to/from others - MPI_Request *requests = xbt_new(MPI_Request, size - 1); - void **tmpbufs = xbt_new(void *, rank); + MPI_Request* requests = new MPI_Request[size - 1]; + unsigned char** tmpbufs = new unsigned char*[rank]; int index = 0; for (int other = 0; other < rank; other++) { tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext); @@ -254,12 +247,12 @@ int Colls::scan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, for(index = 0; index < size-1; index++) { Request::unref(&requests[index]); } - xbt_free(tmpbufs); - xbt_free(requests); + delete[] tmpbufs; + delete[] requests; return MPI_SUCCESS; } -int Colls::exscan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) +int colls::exscan(const void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) { int system_tag = -888; MPI_Aint lb = 0; @@ -271,8 +264,8 @@ int Colls::exscan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype datatype->extent(&lb, &dataext); // Send/Recv buffers to/from others - MPI_Request *requests = xbt_new(MPI_Request, size - 1); - void **tmpbufs = xbt_new(void *, rank); + MPI_Request* requests = new MPI_Request[size - 1]; + unsigned char** tmpbufs = new unsigned char*[rank]; int index = 0; for (int other = 0; other < rank; other++) { tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext); @@ -321,10 +314,20 @@ int Colls::exscan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype for(index = 0; index < size-1; index++) { Request::unref(&requests[index]); } - xbt_free(tmpbufs); - xbt_free(requests); + delete[] tmpbufs; + delete[] requests; return MPI_SUCCESS; } +int colls::alltoallw(const void* sendbuf, const int* sendcounts, const int* senddisps, const MPI_Datatype* sendtypes, + void* recvbuf, const int* recvcounts, const int* recvdisps, const MPI_Datatype* recvtypes, + MPI_Comm comm) +{ + MPI_Request request; + colls::ialltoallw(sendbuf, sendcounts, senddisps, sendtypes, recvbuf, recvcounts, recvdisps, recvtypes, comm, + &request, 0); + return Request::wait(&request, MPI_STATUS_IGNORE); +} + } }