X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/4d648ebbbe5705878080b9cbf1ca61497323c592..ea02d47c2ee462e84798356e4dda87b1467e22b8:/src/smpi/mpi/smpi_win.cpp diff --git a/src/smpi/mpi/smpi_win.cpp b/src/smpi/mpi/smpi_win.cpp index 84d748ac51..297dd072c3 100644 --- a/src/smpi/mpi/smpi_win.cpp +++ b/src/smpi/mpi/smpi_win.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2007-2019. The SimGrid Team. All rights reserved. */ +/* Copyright (c) 2007-2021. The SimGrid Team. All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ @@ -14,6 +14,8 @@ #include "smpi_request.hpp" #include "src/smpi/include/smpi_actor.hpp" +#include + XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_rma, smpi, "Logging specific to SMPI (RMA operations)"); @@ -22,36 +24,35 @@ namespace smpi{ std::unordered_map Win::keyvals_; int Win::keyval_id_=0; -Win::Win(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, int allocated, int dynamic): base_(base), size_(size), disp_unit_(disp_unit), assert_(0), info_(info), comm_(comm), allocated_(allocated), dynamic_(dynamic){ - int comm_size = comm->size(); - rank_ = comm->rank(); +Win::Win(void* base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, int allocated, int dynamic) + : base_(base) + , size_(size) + , disp_unit_(disp_unit) + , info_(info) + , comm_(comm) + , connected_wins_(comm->size()) + , rank_(comm->rank()) + , allocated_(allocated) + , dynamic_(dynamic) +{ XBT_DEBUG("Creating window"); if(info!=MPI_INFO_NULL) info->ref(); - name_ = nullptr; - opened_ = 0; - group_ = MPI_GROUP_NULL; - requests_ = new std::vector(); - mut_ = s4u::Mutex::create(); - lock_mut_ = s4u::Mutex::create(); - atomic_mut_ = s4u::Mutex::create(); - connected_wins_ = new MPI_Win[comm_size]; connected_wins_[rank_] = this; - count_ = 0; if(rank_==0){ - bar_ = new s4u::Barrier(comm_size); + bar_ = new s4u::Barrier(comm->size()); } - mode_=0; - + errhandler_->ref(); comm->add_rma_win(this); comm->ref(); - Colls::allgather(&(connected_wins_[rank_]), sizeof(MPI_Win), MPI_BYTE, connected_wins_, sizeof(MPI_Win), - MPI_BYTE, comm); + colls::allgather(&connected_wins_[rank_], sizeof(MPI_Win), MPI_BYTE, connected_wins_.data(), sizeof(MPI_Win), + MPI_BYTE, comm); - Colls::bcast(&(bar_), sizeof(s4u::Barrier*), MPI_BYTE, 0, comm); + colls::bcast(&bar_, sizeof(s4u::Barrier*), MPI_BYTE, 0, comm); - Colls::barrier(comm); + colls::barrier(comm); + this->add_f(); } Win::~Win(){ @@ -61,18 +62,14 @@ Win::~Win(){ int finished = finish_comms(); XBT_DEBUG("Win destructor - Finished %d RMA calls", finished); - delete requests_; - delete[] connected_wins_; - if (name_ != nullptr){ - xbt_free(name_); - } - if(info_!=MPI_INFO_NULL){ - MPI_Info_free(&info_); - } + if (info_ != MPI_INFO_NULL) + simgrid::smpi::Info::unref(info_); + if (errhandler_ != MPI_ERRHANDLER_NULL) + simgrid::smpi::Errhandler::unref(errhandler_); comm_->remove_rma_win(this); - Colls::barrier(comm_); + colls::barrier(comm_); Comm::unref(comm_); if (rank_ == 0) @@ -81,31 +78,33 @@ Win::~Win(){ if(allocated_ !=0) xbt_free(base_); + F2C::free_f(this->f2c_id()); cleanup_attr(); } -int Win::attach (void *base, MPI_Aint size){ - if (not(base_ == MPI_BOTTOM || base_ == 0)) +int Win::attach(void* /*base*/, MPI_Aint size) +{ + if (not(base_ == MPI_BOTTOM || base_ == nullptr)) return MPI_ERR_ARG; - base_=0;//actually the address will be given in the RMA calls, as being the disp. + base_ = nullptr; // actually the address will be given in the RMA calls, as being the disp. size_+=size; return MPI_SUCCESS; } -int Win::detach (void *base){ +int Win::detach(const void* /*base*/) +{ base_=MPI_BOTTOM; size_=-1; return MPI_SUCCESS; } -void Win::get_name(char* name, int* length){ - if(name_==nullptr){ - *length=0; - name=nullptr; - return; +void Win::get_name(char* name, int* length) const +{ + *length = static_cast(name_.length()); + if (not name_.empty()) { + name_.copy(name, *length); + name[*length] = '\0'; } - *length = strlen(name_); - strncpy(name, name_, *length+1); } void Win::get_group(MPI_Group* group){ @@ -116,41 +115,55 @@ void Win::get_group(MPI_Group* group){ } } -MPI_Info Win::info(){ - if(info_== MPI_INFO_NULL) +MPI_Info Win::info() +{ + if (info_ == MPI_INFO_NULL) info_ = new Info(); info_->ref(); return info_; } -int Win::rank(){ +int Win::rank() const +{ return rank_; } -MPI_Aint Win::size(){ +MPI_Comm Win::comm() const +{ + return comm_; +} + +MPI_Aint Win::size() const +{ return size_; } -void* Win::base(){ +void* Win::base() const +{ return base_; } -int Win::disp_unit(){ +int Win::disp_unit() const +{ return disp_unit_; } -int Win::dynamic(){ +int Win::dynamic() const +{ return dynamic_; } -void Win::set_info(MPI_Info info){ - if(info_!= MPI_INFO_NULL) - info->ref(); - info_=info; +void Win::set_info(MPI_Info info) +{ + if (info_ != MPI_INFO_NULL) + simgrid::smpi::Info::unref(info_); + info_ = info; + if (info_ != MPI_INFO_NULL) + info_->ref(); } -void Win::set_name(char* name){ - name_ = xbt_strdup(name); +void Win::set_name(const char* name){ + name_ = name; } int Win::fence(int assert) @@ -163,14 +176,14 @@ int Win::fence(int assert) bar_->wait(); mut_->lock(); // This (simulated) mutex ensures that no process pushes to the vector of requests during the waitall. - // Without this, the vector could get redimensionned when another process pushes. + // Without this, the vector could get redimensioned when another process pushes. // This would result in the array used by Request::waitall() to be invalidated. // Another solution would be to copy the data and cleanup the vector *before* Request::waitall - std::vector *reqs = requests_; - int size = static_cast(reqs->size()); + // start all requests that have been prepared by another process - if (size > 0) { - MPI_Request* treqs = &(*reqs)[0]; + if (not requests_.empty()) { + int size = static_cast(requests_.size()); + MPI_Request* treqs = requests_.data(); Request::waitall(size, treqs, MPI_STATUSES_IGNORE); } count_=0; @@ -187,11 +200,11 @@ int Win::fence(int assert) return MPI_SUCCESS; } -int Win::put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, +int Win::put(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Request* request) { //get receiver pointer - MPI_Win recv_win = connected_wins_[target_rank]; + Win* recv_win = connected_wins_[target_rank]; if(opened_==0){//check that post/start has been done // no fence or start .. lock ok ? @@ -203,10 +216,12 @@ int Win::put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, return MPI_ERR_WIN; } - if(target_count*target_datatype->get_extent()>recv_win->size_) - return MPI_ERR_ARG; + if(target_count*target_datatype->get_extent()>recv_win->size_){ + XBT_WARN("Trying to put more than the window size - Bailing out."); + return MPI_ERR_RMA_RANGE; + } - void* recv_addr = static_cast ( static_cast(recv_win->base_) + target_disp * recv_win->disp_unit_); + void* recv_addr = static_cast(recv_win->base_) + target_disp * recv_win->disp_unit_; if (target_rank != comm_->rank()) { // This is not for myself, so we need to send messages XBT_DEBUG("Entering MPI_Put to remote rank %d", target_rank); @@ -228,17 +243,16 @@ int Win::put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, *request=sreq; }else{ mut_->lock(); - requests_->push_back(sreq); + requests_.push_back(sreq); mut_->unlock(); } //push request to receiver's win recv_win->mut_->lock(); - recv_win->requests_->push_back(rreq); + recv_win->requests_.push_back(rreq); rreq->start(); recv_win->mut_->unlock(); - - }else{ + } else { XBT_DEBUG("Entering MPI_Put from myself to myself, rank %d", target_rank); Datatype::copy(origin_addr, origin_count, origin_datatype, recv_addr, target_count, target_datatype); if(request!=nullptr) @@ -252,7 +266,7 @@ int Win::get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Request* request) { //get sender pointer - MPI_Win send_win = connected_wins_[target_rank]; + Win* send_win = connected_wins_[target_rank]; if(opened_==0){//check that post/start has been done // no fence or start .. lock ok ? @@ -264,10 +278,12 @@ int Win::get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, return MPI_ERR_WIN; } - if(target_count*target_datatype->get_extent()>send_win->size_) - return MPI_ERR_ARG; + if(target_count*target_datatype->get_extent()>send_win->size_){ + XBT_WARN("Trying to get more than the window size - Bailing out."); + return MPI_ERR_RMA_RANGE; + } - void* send_addr = static_cast(static_cast(send_win->base_) + target_disp * send_win->disp_unit_); + const void* send_addr = static_cast(static_cast(send_win->base_) + target_disp * send_win->disp_unit_); XBT_DEBUG("Entering MPI_Get from %d", target_rank); if(target_rank != comm_->rank()){ @@ -285,7 +301,7 @@ int Win::get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, sreq->start(); //push request to receiver's win send_win->mut_->lock(); - send_win->requests_->push_back(sreq); + send_win->requests_.push_back(sreq); send_win->mut_->unlock(); //start recv @@ -295,26 +311,23 @@ int Win::get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, *request=rreq; }else{ mut_->lock(); - requests_->push_back(rreq); + requests_.push_back(rreq); mut_->unlock(); } - - }else{ + } else { Datatype::copy(send_addr, target_count, target_datatype, origin_addr, origin_count, origin_datatype); if(request!=nullptr) *request=MPI_REQUEST_NULL; } - return MPI_SUCCESS; } - -int Win::accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, +int Win::accumulate(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Request* request) { XBT_DEBUG("Entering MPI_Win_Accumulate"); //get receiver pointer - MPI_Win recv_win = connected_wins_[target_rank]; + Win* recv_win = connected_wins_[target_rank]; if(opened_==0){//check that post/start has been done // no fence or start .. lock ok ? @@ -327,13 +340,16 @@ int Win::accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_da } //FIXME: local version - if(target_count*target_datatype->get_extent()>recv_win->size_) - return MPI_ERR_ARG; + if(target_count*target_datatype->get_extent()>recv_win->size_){ + XBT_WARN("Trying to accumulate more than the window size - Bailing out."); + return MPI_ERR_RMA_RANGE; + } - void* recv_addr = static_cast(static_cast(recv_win->base_) + target_disp * recv_win->disp_unit_); + void* recv_addr = static_cast(recv_win->base_) + target_disp * recv_win->disp_unit_; XBT_DEBUG("Entering MPI_Accumulate to %d", target_rank); - //As the tag will be used for ordering of the operations, substract count from it (to avoid collisions with other SMPI tags, SMPI_RMA_TAG is set below all the other ones we use ) - //prepare send_request + // As the tag will be used for ordering of the operations, subtract count from it (to avoid collisions with other + // SMPI tags, SMPI_RMA_TAG is set below all the other ones we use) + // prepare send_request MPI_Request sreq = Request::rma_send_init(origin_addr, origin_count, origin_datatype, comm_->rank(), target_rank, SMPI_RMA_TAG - 3 - count_, comm_, op); @@ -348,7 +364,7 @@ int Win::accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_da sreq->start(); // push request to receiver's win recv_win->mut_->lock(); - recv_win->requests_->push_back(rreq); + recv_win->requests_.push_back(rreq); rreq->start(); recv_win->mut_->unlock(); @@ -356,7 +372,7 @@ int Win::accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_da *request = sreq; } else { mut_->lock(); - requests_->push_back(sreq); + requests_.push_back(sreq); mut_->unlock(); } @@ -364,12 +380,12 @@ int Win::accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_da return MPI_SUCCESS; } -int Win::get_accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr, - int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count, - MPI_Datatype target_datatype, MPI_Op op, MPI_Request* request){ - +int Win::get_accumulate(const void* origin_addr, int origin_count, MPI_Datatype origin_datatype, void* result_addr, + int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, + int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Request*) +{ //get sender pointer - MPI_Win send_win = connected_wins_[target_rank]; + const Win* send_win = connected_wins_[target_rank]; if(opened_==0){//check that post/start has been done // no fence or start .. lock ok ? @@ -381,8 +397,11 @@ int Win::get_accumulate( void *origin_addr, int origin_count, MPI_Datatype origi return MPI_ERR_WIN; } - if(target_count*target_datatype->get_extent()>send_win->size_) - return MPI_ERR_ARG; + if(target_count*target_datatype->get_extent()>send_win->size_){ + XBT_WARN("Trying to get_accumulate more than the window size - Bailing out."); + return MPI_ERR_RMA_RANGE; + } + XBT_DEBUG("Entering MPI_Get_accumulate from %d", target_rank); //need to be sure ops are correctly ordered, so finish request here ? slow. @@ -399,14 +418,13 @@ int Win::get_accumulate( void *origin_addr, int origin_count, MPI_Datatype origi Request::wait(&req, MPI_STATUS_IGNORE); send_win->atomic_mut_->unlock(); return MPI_SUCCESS; - } -int Win::compare_and_swap(void *origin_addr, void *compare_addr, - void *result_addr, MPI_Datatype datatype, int target_rank, - MPI_Aint target_disp){ +int Win::compare_and_swap(const void* origin_addr, const void* compare_addr, void* result_addr, MPI_Datatype datatype, + int target_rank, MPI_Aint target_disp) +{ //get sender pointer - MPI_Win send_win = connected_wins_[target_rank]; + const Win* send_win = connected_wins_[target_rank]; if(opened_==0){//check that post/start has been done // no fence or start .. lock ok ? @@ -433,7 +451,8 @@ int Win::compare_and_swap(void *origin_addr, void *compare_addr, return MPI_SUCCESS; } -int Win::start(MPI_Group group, int assert){ +int Win::start(MPI_Group group, int /*assert*/) +{ /* From MPI forum advices The call to MPI_WIN_COMPLETE does not return until the put call has completed at the origin; and the target window will be accessed by the put operation only after the call to MPI_WIN_START has matched a call to MPI_WIN_POST by @@ -450,7 +469,7 @@ int Win::start(MPI_Group group, int assert){ int i = 0; int j = 0; int size = group->size(); - MPI_Request* reqs = xbt_new0(MPI_Request, size); + std::vector reqs(size); XBT_DEBUG("Entering MPI_Win_Start"); while (j != size) { @@ -462,12 +481,11 @@ int Win::start(MPI_Group group, int assert){ j++; } size = i; - Request::startall(size, reqs); - Request::waitall(size, reqs, MPI_STATUSES_IGNORE); + Request::startall(size, reqs.data()); + Request::waitall(size, reqs.data(), MPI_STATUSES_IGNORE); for (i = 0; i < size; i++) { Request::unref(&reqs[i]); } - xbt_free(reqs); opened_++; //we're open for business ! group_=group; group->ref(); @@ -475,12 +493,13 @@ int Win::start(MPI_Group group, int assert){ return MPI_SUCCESS; } -int Win::post(MPI_Group group, int assert){ +int Win::post(MPI_Group group, int /*assert*/) +{ //let's make a synchronous send here int i = 0; int j = 0; int size = group->size(); - MPI_Request* reqs = xbt_new0(MPI_Request, size); + std::vector reqs(size); XBT_DEBUG("Entering MPI_Win_Post"); while(j!=size){ @@ -493,12 +512,11 @@ int Win::post(MPI_Group group, int assert){ } size=i; - Request::startall(size, reqs); - Request::waitall(size, reqs, MPI_STATUSES_IGNORE); + Request::startall(size, reqs.data()); + Request::waitall(size, reqs.data(), MPI_STATUSES_IGNORE); for(i=0;iref(); @@ -507,14 +525,13 @@ int Win::post(MPI_Group group, int assert){ } int Win::complete(){ - if(opened_==0) - xbt_die("Complete called on already opened MPI_Win"); + xbt_assert(opened_ != 0, "Complete called on already opened MPI_Win"); XBT_DEBUG("Entering MPI_Win_Complete"); int i = 0; int j = 0; int size = group_->size(); - MPI_Request* reqs = xbt_new0(MPI_Request, size); + std::vector reqs(size); while(j!=size){ int dst = comm_->group()->rank(group_->actor(j)); @@ -526,13 +543,12 @@ int Win::complete(){ } size=i; XBT_DEBUG("Win_complete - Sending sync messages to %d processes", size); - Request::startall(size, reqs); - Request::waitall(size, reqs, MPI_STATUSES_IGNORE); + Request::startall(size, reqs.data()); + Request::waitall(size, reqs.data(), MPI_STATUSES_IGNORE); for(i=0;isize(); - MPI_Request* reqs = xbt_new0(MPI_Request, size); + std::vector reqs(size); while(j!=size){ int src = comm_->group()->rank(group_->actor(j)); @@ -560,12 +576,11 @@ int Win::wait(){ } size=i; XBT_DEBUG("Win_wait - Receiving sync messages from %d processes", size); - Request::startall(size, reqs); - Request::waitall(size, reqs, MPI_STATUSES_IGNORE); + Request::startall(size, reqs.data()); + Request::waitall(size, reqs.data(), MPI_STATUSES_IGNORE); for(i=0;imode_ != MPI_LOCK_SHARED)|| target_win->mode_ == MPI_LOCK_EXCLUSIVE){ @@ -596,12 +612,11 @@ int Win::lock(int lock_type, int rank, int assert){ } int Win::lock_all(int assert){ - int i=0; int retval = MPI_SUCCESS; - for (i=0; isize();i++){ - int ret = this->lock(MPI_LOCK_SHARED, i, assert); - if(ret != MPI_SUCCESS) - retval = ret; + for (int i = 0; i < comm_->size(); i++) { + int ret = this->lock(MPI_LOCK_SHARED, i, assert); + if (ret != MPI_SUCCESS) + retval = ret; } return retval; } @@ -623,9 +638,8 @@ int Win::unlock(int rank){ } int Win::unlock_all(){ - int i=0; int retval = MPI_SUCCESS; - for (i=0; isize();i++){ + for (int i = 0; i < comm_->size(); i++) { int ret = this->unlock(i); if (ret != MPI_SUCCESS) retval = ret; @@ -671,12 +685,11 @@ Win* Win::f2c(int id){ int Win::finish_comms(){ mut_->lock(); //Finish own requests - std::vector *reqqs = requests_; - int size = static_cast(reqqs->size()); + int size = static_cast(requests_.size()); if (size > 0) { - MPI_Request* treqs = &(*reqqs)[0]; + MPI_Request* treqs = requests_.data(); Request::waitall(size, treqs, MPI_STATUSES_IGNORE); - reqqs->clear(); + requests_.clear(); } mut_->unlock(); return size; @@ -684,40 +697,30 @@ int Win::finish_comms(){ int Win::finish_comms(int rank){ mut_->lock(); - //Finish own requests - std::vector *reqqs = requests_; - int size = static_cast(reqqs->size()); + // Finish own requests + // Let's see if we're either the destination or the sender of this request + // because we only wait for requests that we are responsible for. + // Also use the process id here since the request itself returns from src() + // and dst() the process id, NOT the rank (which only exists in the context of a communicator). + int proc_id = comm_->group()->actor(rank)->get_pid(); + auto it = std::stable_partition(begin(requests_), end(requests_), [proc_id](const MPI_Request& req) { + return (req == MPI_REQUEST_NULL || (req->src() != proc_id && req->dst() != proc_id)); + }); + std::vector myreqqs(it, end(requests_)); + requests_.erase(it, end(requests_)); + int size = static_cast(myreqqs.size()); if (size > 0) { - size = 0; - std::vector myreqqs; - std::vector::iterator iter = reqqs->begin(); - int proc_id = comm_->group()->actor(rank)->get_pid(); - while (iter != reqqs->end()){ - // Let's see if we're either the destination or the sender of this request - // because we only wait for requests that we are responsible for. - // Also use the process id here since the request itself returns from src() - // and dst() the process id, NOT the rank (which only exists in the context of a communicator). - if (((*iter) != MPI_REQUEST_NULL) && (((*iter)->src() == proc_id) || ((*iter)->dst() == proc_id))) { - myreqqs.push_back(*iter); - iter = reqqs->erase(iter); - size++; - } else { - ++iter; - } - } - if(size >0){ - MPI_Request* treqs = &myreqqs[0]; - Request::waitall(size, treqs, MPI_STATUSES_IGNORE); - myreqqs.clear(); - } + MPI_Request* treqs = myreqqs.data(); + Request::waitall(size, treqs, MPI_STATUSES_IGNORE); + myreqqs.clear(); } mut_->unlock(); return size; } -int Win::shared_query(int rank, MPI_Aint* size, int* disp_unit, void* baseptr) +int Win::shared_query(int rank, MPI_Aint* size, int* disp_unit, void* baseptr) const { - MPI_Win target_win = rank != MPI_PROC_NULL ? connected_wins_[rank] : nullptr; + const Win* target_win = rank != MPI_PROC_NULL ? connected_wins_[rank] : nullptr; for (int i = 0; not target_win && i < comm_->size(); i++) { if (connected_wins_[i]->size_ > 0) target_win = connected_wins_[i]; @@ -728,9 +731,25 @@ int Win::shared_query(int rank, MPI_Aint* size, int* disp_unit, void* baseptr) *static_cast(baseptr) = target_win->base_; } else { *size = 0; - *static_cast(baseptr) = xbt_malloc(0); + *static_cast(baseptr) = nullptr; } return MPI_SUCCESS; } + +MPI_Errhandler Win::errhandler() +{ + if (errhandler_ != MPI_ERRHANDLER_NULL) + errhandler_->ref(); + return errhandler_; } -} + +void Win::set_errhandler(MPI_Errhandler errhandler) +{ + if (errhandler_ != MPI_ERRHANDLER_NULL) + simgrid::smpi::Errhandler::unref(errhandler_); + errhandler_ = errhandler; + if (errhandler_ != MPI_ERRHANDLER_NULL) + errhandler_->ref(); +} +} // namespace smpi +} // namespace simgrid