X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/67fe7b9d6c00b390a8598bc1e72d42d8343cb218..1bf13b42c09009ec33fd1928b8bffd2ded6bb931:/src/smpi/mpi/smpi_win.cpp diff --git a/src/smpi/mpi/smpi_win.cpp b/src/smpi/mpi/smpi_win.cpp index c0abd4dde2..1dfda94394 100644 --- a/src/smpi/mpi/smpi_win.cpp +++ b/src/smpi/mpi/smpi_win.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2007-2018. The SimGrid Team. All rights reserved. */ +/* Copyright (c) 2007-2019. The SimGrid Team. All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ @@ -16,43 +16,51 @@ XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_rma, smpi, "Logging specific to SMPI (RMA operations)"); -using simgrid::s4u::Actor; namespace simgrid{ namespace smpi{ std::unordered_map Win::keyvals_; int Win::keyval_id_=0; -Win::Win(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, int allocated, int dynamic): base_(base), size_(size), disp_unit_(disp_unit), assert_(0), info_(info), comm_(comm), allocated_(allocated), dynamic_(dynamic){ - int comm_size = comm->size(); - rank_ = comm->rank(); +Win::Win(void* base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, int allocated, int dynamic) + : base_(base) + , size_(size) + , disp_unit_(disp_unit) + , info_(info) + , comm_(comm) + , rank_(comm->rank()) + , allocated_(allocated) + , dynamic_(dynamic) +{ XBT_DEBUG("Creating window"); if(info!=MPI_INFO_NULL) info->ref(); + int comm_size = comm->size(); name_ = nullptr; opened_ = 0; group_ = MPI_GROUP_NULL; requests_ = new std::vector(); - mut_ = xbt_mutex_init(); - lock_mut_ = xbt_mutex_init(); - atomic_mut_ = xbt_mutex_init(); + mut_ = s4u::Mutex::create(); + lock_mut_ = s4u::Mutex::create(); + atomic_mut_ = s4u::Mutex::create(); connected_wins_ = new MPI_Win[comm_size]; connected_wins_[rank_] = this; count_ = 0; if(rank_==0){ - bar_ = new simgrid::s4u::Barrier(comm_size); + bar_ = new s4u::Barrier(comm_size); } mode_=0; - + errhandler_=MPI_ERRORS_ARE_FATAL; + errhandler_->ref(); comm->add_rma_win(this); comm->ref(); - Colls::allgather(&(connected_wins_[rank_]), sizeof(MPI_Win), MPI_BYTE, connected_wins_, sizeof(MPI_Win), - MPI_BYTE, comm); + colls::allgather(&(connected_wins_[rank_]), sizeof(MPI_Win), MPI_BYTE, connected_wins_, sizeof(MPI_Win), MPI_BYTE, + comm); - Colls::bcast(&(bar_), sizeof(simgrid::s4u::Barrier*), MPI_BYTE, 0, comm); + colls::bcast(&(bar_), sizeof(s4u::Barrier*), MPI_BYTE, 0, comm); - Colls::barrier(comm); + colls::barrier(comm); } Win::~Win(){ @@ -67,20 +75,18 @@ Win::~Win(){ if (name_ != nullptr){ xbt_free(name_); } - if(info_!=MPI_INFO_NULL){ - MPI_Info_free(&info_); - } + if (info_ != MPI_INFO_NULL) + simgrid::smpi::Info::unref(info_); + if (errhandler_ != MPI_ERRHANDLER_NULL) + simgrid::smpi::Errhandler::unref(errhandler_); comm_->remove_rma_win(this); - Colls::barrier(comm_); + colls::barrier(comm_); Comm::unref(comm_); if (rank_ == 0) delete bar_; - xbt_mutex_destroy(mut_); - xbt_mutex_destroy(lock_mut_); - xbt_mutex_destroy(atomic_mut_); if(allocated_ !=0) xbt_free(base_); @@ -88,7 +94,8 @@ Win::~Win(){ cleanup_attr(); } -int Win::attach (void *base, MPI_Aint size){ +int Win::attach(void* /*base*/, MPI_Aint size) +{ if (not(base_ == MPI_BOTTOM || base_ == 0)) return MPI_ERR_ARG; base_=0;//actually the address will be given in the RMA calls, as being the disp. @@ -96,7 +103,8 @@ int Win::attach (void *base, MPI_Aint size){ return MPI_SUCCESS; } -int Win::detach (void *base){ +int Win::detach(const void* /*base*/) +{ base_=MPI_BOTTOM; size_=-1; return MPI_SUCCESS; @@ -120,8 +128,9 @@ void Win::get_group(MPI_Group* group){ } } -MPI_Info Win::info(){ - if(info_== MPI_INFO_NULL) +MPI_Info Win::info() +{ + if (info_ == MPI_INFO_NULL) info_ = new Info(); info_->ref(); return info_; @@ -147,13 +156,16 @@ int Win::dynamic(){ return dynamic_; } -void Win::set_info(MPI_Info info){ - if(info_!= MPI_INFO_NULL) - info->ref(); - info_=info; +void Win::set_info(MPI_Info info) +{ + if (info_ != MPI_INFO_NULL) + simgrid::smpi::Info::unref(info_); + info_ = info; + if (info_ != MPI_INFO_NULL) + info_->ref(); } -void Win::set_name(char* name){ +void Win::set_name(const char* name){ name_ = xbt_strdup(name); } @@ -165,9 +177,9 @@ int Win::fence(int assert) if (assert != MPI_MODE_NOPRECEDE) { // This is not the first fence => finalize what came before bar_->wait(); - xbt_mutex_acquire(mut_); + mut_->lock(); // This (simulated) mutex ensures that no process pushes to the vector of requests during the waitall. - // Without this, the vector could get redimensionned when another process pushes. + // Without this, the vector could get redimensioned when another process pushes. // This would result in the array used by Request::waitall() to be invalidated. // Another solution would be to copy the data and cleanup the vector *before* Request::waitall std::vector *reqs = requests_; @@ -178,7 +190,7 @@ int Win::fence(int assert) Request::waitall(size, treqs, MPI_STATUSES_IGNORE); } count_=0; - xbt_mutex_release(mut_); + mut_->unlock(); } if(assert==MPI_MODE_NOSUCCEED)//there should be no ops after this one, tell we are closed. @@ -191,11 +203,11 @@ int Win::fence(int assert) return MPI_SUCCESS; } -int Win::put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, +int Win::put(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Request* request) { //get receiver pointer - MPI_Win recv_win = connected_wins_[target_rank]; + const Win* recv_win = connected_wins_[target_rank]; if(opened_==0){//check that post/start has been done // no fence or start .. lock ok ? @@ -231,18 +243,17 @@ int Win::put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, if(request!=nullptr){ *request=sreq; }else{ - xbt_mutex_acquire(mut_); + mut_->lock(); requests_->push_back(sreq); - xbt_mutex_release(mut_); + mut_->unlock(); } //push request to receiver's win - xbt_mutex_acquire(recv_win->mut_); + recv_win->mut_->lock(); recv_win->requests_->push_back(rreq); rreq->start(); - xbt_mutex_release(recv_win->mut_); - - }else{ + recv_win->mut_->unlock(); + } else { XBT_DEBUG("Entering MPI_Put from myself to myself, rank %d", target_rank); Datatype::copy(origin_addr, origin_count, origin_datatype, recv_addr, target_count, target_datatype); if(request!=nullptr) @@ -256,7 +267,7 @@ int Win::get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Request* request) { //get sender pointer - MPI_Win send_win = connected_wins_[target_rank]; + const Win* send_win = connected_wins_[target_rank]; if(opened_==0){//check that post/start has been done // no fence or start .. lock ok ? @@ -271,7 +282,7 @@ int Win::get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, if(target_count*target_datatype->get_extent()>send_win->size_) return MPI_ERR_ARG; - void* send_addr = static_cast(static_cast(send_win->base_) + target_disp * send_win->disp_unit_); + const void* send_addr = static_cast(static_cast(send_win->base_) + target_disp * send_win->disp_unit_); XBT_DEBUG("Entering MPI_Get from %d", target_rank); if(target_rank != comm_->rank()){ @@ -288,9 +299,9 @@ int Win::get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, //start the send, with another process than us as sender. sreq->start(); //push request to receiver's win - xbt_mutex_acquire(send_win->mut_); + send_win->mut_->lock(); send_win->requests_->push_back(sreq); - xbt_mutex_release(send_win->mut_); + send_win->mut_->unlock(); //start recv rreq->start(); @@ -298,27 +309,24 @@ int Win::get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, if(request!=nullptr){ *request=rreq; }else{ - xbt_mutex_acquire(mut_); + mut_->lock(); requests_->push_back(rreq); - xbt_mutex_release(mut_); + mut_->unlock(); } - - }else{ + } else { Datatype::copy(send_addr, target_count, target_datatype, origin_addr, origin_count, origin_datatype); if(request!=nullptr) *request=MPI_REQUEST_NULL; } - return MPI_SUCCESS; } - -int Win::accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, +int Win::accumulate(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Request* request) { XBT_DEBUG("Entering MPI_Win_Accumulate"); //get receiver pointer - MPI_Win recv_win = connected_wins_[target_rank]; + const Win* recv_win = connected_wins_[target_rank]; if(opened_==0){//check that post/start has been done // no fence or start .. lock ok ? @@ -336,8 +344,9 @@ int Win::accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_da void* recv_addr = static_cast(static_cast(recv_win->base_) + target_disp * recv_win->disp_unit_); XBT_DEBUG("Entering MPI_Accumulate to %d", target_rank); - //As the tag will be used for ordering of the operations, substract count from it (to avoid collisions with other SMPI tags, SMPI_RMA_TAG is set below all the other ones we use ) - //prepare send_request + // As the tag will be used for ordering of the operations, subtract count from it (to avoid collisions with other + // SMPI tags, SMPI_RMA_TAG is set below all the other ones we use) + // prepare send_request MPI_Request sreq = Request::rma_send_init(origin_addr, origin_count, origin_datatype, comm_->rank(), target_rank, SMPI_RMA_TAG - 3 - count_, comm_, op); @@ -351,29 +360,29 @@ int Win::accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_da // start send sreq->start(); // push request to receiver's win - xbt_mutex_acquire(recv_win->mut_); + recv_win->mut_->lock(); recv_win->requests_->push_back(rreq); rreq->start(); - xbt_mutex_release(recv_win->mut_); + recv_win->mut_->unlock(); if (request != nullptr) { *request = sreq; } else { - xbt_mutex_acquire(mut_); + mut_->lock(); requests_->push_back(sreq); - xbt_mutex_release(mut_); + mut_->unlock(); } XBT_DEBUG("Leaving MPI_Win_Accumulate"); return MPI_SUCCESS; } -int Win::get_accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr, - int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count, - MPI_Datatype target_datatype, MPI_Op op, MPI_Request* request){ - +int Win::get_accumulate(const void* origin_addr, int origin_count, MPI_Datatype origin_datatype, void* result_addr, + int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, + int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Request*) +{ //get sender pointer - MPI_Win send_win = connected_wins_[target_rank]; + const Win* send_win = connected_wins_[target_rank]; if(opened_==0){//check that post/start has been done // no fence or start .. lock ok ? @@ -391,7 +400,7 @@ int Win::get_accumulate( void *origin_addr, int origin_count, MPI_Datatype origi XBT_DEBUG("Entering MPI_Get_accumulate from %d", target_rank); //need to be sure ops are correctly ordered, so finish request here ? slow. MPI_Request req; - xbt_mutex_acquire(send_win->atomic_mut_); + send_win->atomic_mut_->lock(); get(result_addr, result_count, result_datatype, target_rank, target_disp, target_count, target_datatype, &req); if (req != MPI_REQUEST_NULL) @@ -401,16 +410,15 @@ int Win::get_accumulate( void *origin_addr, int origin_count, MPI_Datatype origi target_disp, target_count, target_datatype, op, &req); if (req != MPI_REQUEST_NULL) Request::wait(&req, MPI_STATUS_IGNORE); - xbt_mutex_release(send_win->atomic_mut_); + send_win->atomic_mut_->unlock(); return MPI_SUCCESS; - } -int Win::compare_and_swap(void *origin_addr, void *compare_addr, +int Win::compare_and_swap(const void *origin_addr, void *compare_addr, void *result_addr, MPI_Datatype datatype, int target_rank, MPI_Aint target_disp){ //get sender pointer - MPI_Win send_win = connected_wins_[target_rank]; + const Win* send_win = connected_wins_[target_rank]; if(opened_==0){//check that post/start has been done // no fence or start .. lock ok ? @@ -424,7 +432,7 @@ int Win::compare_and_swap(void *origin_addr, void *compare_addr, XBT_DEBUG("Entering MPI_Compare_and_swap with %d", target_rank); MPI_Request req = MPI_REQUEST_NULL; - xbt_mutex_acquire(send_win->atomic_mut_); + send_win->atomic_mut_->lock(); get(result_addr, 1, datatype, target_rank, target_disp, 1, datatype, &req); if (req != MPI_REQUEST_NULL) @@ -433,11 +441,12 @@ int Win::compare_and_swap(void *origin_addr, void *compare_addr, put(origin_addr, 1, datatype, target_rank, target_disp, 1, datatype); } - xbt_mutex_release(send_win->atomic_mut_); + send_win->atomic_mut_->unlock(); return MPI_SUCCESS; } -int Win::start(MPI_Group group, int assert){ +int Win::start(MPI_Group group, int /*assert*/) +{ /* From MPI forum advices The call to MPI_WIN_COMPLETE does not return until the put call has completed at the origin; and the target window will be accessed by the put operation only after the call to MPI_WIN_START has matched a call to MPI_WIN_POST by @@ -479,7 +488,8 @@ int Win::start(MPI_Group group, int assert){ return MPI_SUCCESS; } -int Win::post(MPI_Group group, int assert){ +int Win::post(MPI_Group group, int /*assert*/) +{ //let's make a synchronous send here int i = 0; int j = 0; @@ -578,14 +588,15 @@ int Win::wait(){ return MPI_SUCCESS; } -int Win::lock(int lock_type, int rank, int assert){ +int Win::lock(int lock_type, int rank, int /*assert*/) +{ MPI_Win target_win = connected_wins_[rank]; if ((lock_type == MPI_LOCK_EXCLUSIVE && target_win->mode_ != MPI_LOCK_SHARED)|| target_win->mode_ == MPI_LOCK_EXCLUSIVE){ - xbt_mutex_acquire(target_win->lock_mut_); + target_win->lock_mut_->lock(); target_win->mode_+= lock_type;//add the lock_type to differentiate case when we are switching from EXCLUSIVE to SHARED (no release needed in the unlock) if(lock_type == MPI_LOCK_SHARED){//the window used to be exclusive, it's now shared. - xbt_mutex_release(target_win->lock_mut_); + target_win->lock_mut_->unlock(); } } else if (not(target_win->mode_ == MPI_LOCK_SHARED && lock_type == MPI_LOCK_EXCLUSIVE)) target_win->mode_ += lock_type; // don't set to exclusive if it's already shared @@ -616,7 +627,7 @@ int Win::unlock(int rank){ target_win->mode_= 0; target_win->lockers_.remove(comm_->rank()); if (target_mode==MPI_LOCK_EXCLUSIVE){ - xbt_mutex_release(target_win->lock_mut_); + target_win->lock_mut_->unlock(); } int finished = finish_comms(rank); @@ -673,7 +684,7 @@ Win* Win::f2c(int id){ } int Win::finish_comms(){ - xbt_mutex_acquire(mut_); + mut_->lock(); //Finish own requests std::vector *reqqs = requests_; int size = static_cast(reqqs->size()); @@ -682,12 +693,12 @@ int Win::finish_comms(){ Request::waitall(size, treqs, MPI_STATUSES_IGNORE); reqqs->clear(); } - xbt_mutex_release(mut_); + mut_->unlock(); return size; } int Win::finish_comms(int rank){ - xbt_mutex_acquire(mut_); + mut_->lock(); //Finish own requests std::vector *reqqs = requests_; int size = static_cast(reqqs->size()); @@ -715,13 +726,13 @@ int Win::finish_comms(int rank){ myreqqs.clear(); } } - xbt_mutex_release(mut_); + mut_->unlock(); return size; } int Win::shared_query(int rank, MPI_Aint* size, int* disp_unit, void* baseptr) { - MPI_Win target_win = rank != MPI_PROC_NULL ? connected_wins_[rank] : nullptr; + const Win* target_win = rank != MPI_PROC_NULL ? connected_wins_[rank] : nullptr; for (int i = 0; not target_win && i < comm_->size(); i++) { if (connected_wins_[i]->size_ > 0) target_win = connected_wins_[i]; @@ -732,9 +743,25 @@ int Win::shared_query(int rank, MPI_Aint* size, int* disp_unit, void* baseptr) *static_cast(baseptr) = target_win->base_; } else { *size = 0; - *static_cast(baseptr) = xbt_malloc(0); + *static_cast(baseptr) = nullptr; } return MPI_SUCCESS; } + +MPI_Errhandler Win::errhandler() +{ + if (errhandler_ != MPI_ERRHANDLER_NULL) + errhandler_->ref(); + return errhandler_; } -} + +void Win::set_errhandler(MPI_Errhandler errhandler) +{ + if (errhandler_ != MPI_ERRHANDLER_NULL) + simgrid::smpi::Errhandler::unref(errhandler_); + errhandler_ = errhandler; + if (errhandler_ != MPI_ERRHANDLER_NULL) + errhandler_->ref(); +} +} // namespace smpi +} // namespace simgrid