X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/f50c785a8f726657dc3b4e32de522a7b3baca707..3aa62b6a4755ba871847a5f86473c0e651c9d661:/src/smpi/smpi_win.cpp diff --git a/src/smpi/smpi_win.cpp b/src/smpi/smpi_win.cpp index 67612c3be2..f5e573dfb8 100644 --- a/src/smpi/smpi_win.cpp +++ b/src/smpi/smpi_win.cpp @@ -16,7 +16,7 @@ int Win::keyval_id_=0; Win::Win(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm): base_(base), size_(size), disp_unit_(disp_unit), assert_(0), info_(info), comm_(comm){ int comm_size = comm->size(); - int rank = comm->rank(); + rank_ = comm->rank(); XBT_DEBUG("Creating window"); if(info!=MPI_INFO_NULL) info->ref(); @@ -25,13 +25,18 @@ Win::Win(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm) group_ = MPI_GROUP_NULL; requests_ = new std::vector(); mut_=xbt_mutex_init(); + lock_mut_=xbt_mutex_init(); connected_wins_ = new MPI_Win[comm_size]; - connected_wins_[rank] = this; + connected_wins_[rank_] = this; count_ = 0; - if(rank==0){ + if(rank_==0){ bar_ = MSG_barrier_init(comm_size); } - Colls::allgather(&(connected_wins_[rank]), sizeof(MPI_Win), MPI_BYTE, connected_wins_, sizeof(MPI_Win), + mode_=0; + + comm->add_rma_win(this); + + Colls::allgather(&(connected_wins_[rank_]), sizeof(MPI_Win), MPI_BYTE, connected_wins_, sizeof(MPI_Win), MPI_BYTE, comm); Colls::bcast(&(bar_), sizeof(msg_bar_t), MPI_BYTE, 0, comm); @@ -42,9 +47,11 @@ Win::Win(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm) Win::~Win(){ //As per the standard, perform a barrier to ensure every async comm is finished MSG_barrier_wait(bar_); - xbt_mutex_acquire(mut_); + + int finished = finish_comms(); + XBT_DEBUG("Win destructor - Finished %d RMA calls", finished); + delete requests_; - xbt_mutex_release(mut_); delete[] connected_wins_; if (name_ != nullptr){ xbt_free(name_); @@ -53,11 +60,14 @@ Win::~Win(){ MPI_Info_free(&info_); } + comm_->remove_rma_win(this); + Colls::barrier(comm_); int rank=comm_->rank(); if(rank == 0) MSG_barrier_destroy(bar_); xbt_mutex_destroy(mut_); + xbt_mutex_destroy(lock_mut_); cleanup_attr(); } @@ -80,6 +90,10 @@ void Win::get_group(MPI_Group* group){ } } +int Win::rank(){ + return rank_; +} + MPI_Aint Win::size(){ return size_; } @@ -137,11 +151,19 @@ int Win::fence(int assert) int Win::put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype) { - if(opened_==0)//check that post/start has been done - return MPI_ERR_WIN; //get receiver pointer MPI_Win recv_win = connected_wins_[target_rank]; + if(opened_==0){//check that post/start has been done + // no fence or start .. lock ok ? + int locked=0; + for(auto it : recv_win->lockers_) + if (it == comm_->rank()) + locked = 1; + if(locked != 1) + return MPI_ERR_WIN; + } + if(target_count*target_datatype->get_extent()>recv_win->size_) return MPI_ERR_ARG; @@ -150,11 +172,11 @@ int Win::put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, if(target_rank != comm_->rank()){ //prepare send_request - MPI_Request sreq = Request::rma_send_init(origin_addr, origin_count, origin_datatype, smpi_process_index(), + MPI_Request sreq = Request::rma_send_init(origin_addr, origin_count, origin_datatype, smpi_process()->index(), comm_->group()->index(target_rank), SMPI_RMA_TAG+1, comm_, MPI_OP_NULL); //prepare receiver request - MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, smpi_process_index(), + MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, smpi_process()->index(), comm_->group()->index(target_rank), SMPI_RMA_TAG+1, recv_win->comm_, MPI_OP_NULL); //push request to receiver's win @@ -178,11 +200,19 @@ int Win::put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int Win::get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype) { - if(opened_==0)//check that post/start has been done - return MPI_ERR_WIN; //get sender pointer MPI_Win send_win = connected_wins_[target_rank]; + if(opened_==0){//check that post/start has been done + // no fence or start .. lock ok ? + int locked=0; + for(auto it : send_win->lockers_) + if (it == comm_->rank()) + locked = 1; + if(locked != 1) + return MPI_ERR_WIN; + } + if(target_count*target_datatype->get_extent()>send_win->size_) return MPI_ERR_ARG; @@ -192,12 +222,12 @@ int Win::get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, if(target_rank != comm_->rank()){ //prepare send_request MPI_Request sreq = Request::rma_send_init(send_addr, target_count, target_datatype, - comm_->group()->index(target_rank), smpi_process_index(), SMPI_RMA_TAG+2, send_win->comm_, + comm_->group()->index(target_rank), smpi_process()->index(), SMPI_RMA_TAG+2, send_win->comm_, MPI_OP_NULL); //prepare receiver request MPI_Request rreq = Request::rma_recv_init(origin_addr, origin_count, origin_datatype, - comm_->group()->index(target_rank), smpi_process_index(), SMPI_RMA_TAG+2, comm_, + comm_->group()->index(target_rank), smpi_process()->index(), SMPI_RMA_TAG+2, comm_, MPI_OP_NULL); //start the send, with another process than us as sender. @@ -224,25 +254,35 @@ int Win::get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int Win::accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op) { - if(opened_==0)//check that post/start has been done - return MPI_ERR_WIN; - //FIXME: local version + //get receiver pointer MPI_Win recv_win = connected_wins_[target_rank]; + if(opened_==0){//check that post/start has been done + // no fence or start .. lock ok ? + int locked=0; + for(auto it : recv_win->lockers_) + if (it == comm_->rank()) + locked = 1; + if(locked != 1) + return MPI_ERR_WIN; + } + //FIXME: local version + if(target_count*target_datatype->get_extent()>recv_win->size_) return MPI_ERR_ARG; void* recv_addr = static_cast(static_cast(recv_win->base_) + target_disp * recv_win->disp_unit_); XBT_DEBUG("Entering MPI_Accumulate to %d", target_rank); - //As the tag will be used for ordering of the operations, add count to it + //As the tag will be used for ordering of the operations, substract count from it (to avoid collisions with other SMPI tags, SMPI_RMA_TAG is set below all the other ones we use ) //prepare send_request + MPI_Request sreq = Request::rma_send_init(origin_addr, origin_count, origin_datatype, - smpi_process_index(), comm_->group()->index(target_rank), SMPI_RMA_TAG+3+count_, comm_, op); + smpi_process()->index(), comm_->group()->index(target_rank), SMPI_RMA_TAG-3-count_, comm_, op); //prepare receiver request MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, - smpi_process_index(), comm_->group()->index(target_rank), SMPI_RMA_TAG+3+count_, recv_win->comm_, op); + smpi_process()->index(), comm_->group()->index(target_rank), SMPI_RMA_TAG-3-count_, recv_win->comm_, op); count_++; //push request to receiver's win @@ -281,7 +321,7 @@ int Win::start(MPI_Group group, int assert){ while (j != size) { int src = group->index(j); - if (src != smpi_process_index() && src != MPI_UNDEFINED) { + if (src != smpi_process()->index() && src != MPI_UNDEFINED) { reqs[i] = Request::irecv_init(nullptr, 0, MPI_CHAR, src, SMPI_RMA_TAG + 4, MPI_COMM_WORLD); i++; } @@ -309,7 +349,7 @@ int Win::post(MPI_Group group, int assert){ while(j!=size){ int dst=group->index(j); - if(dst!=smpi_process_index() && dst!=MPI_UNDEFINED){ + if(dst!=smpi_process()->index() && dst!=MPI_UNDEFINED){ reqs[i]=Request::send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG+4, MPI_COMM_WORLD); i++; } @@ -341,7 +381,7 @@ int Win::complete(){ while(j!=size){ int dst=group_->index(j); - if(dst!=smpi_process_index() && dst!=MPI_UNDEFINED){ + if(dst!=smpi_process()->index() && dst!=MPI_UNDEFINED){ reqs[i]=Request::send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG+5, MPI_COMM_WORLD); i++; } @@ -357,24 +397,8 @@ int Win::complete(){ } xbt_free(reqs); - //now we can finish RMA calls - xbt_mutex_acquire(mut_); - std::vector *reqqs = requests_; - size = static_cast(reqqs->size()); - - XBT_DEBUG("Win_complete - Finishing %d RMA calls", size); - if (size > 0) { - // start all requests that have been prepared by another process - for (const auto& req : *reqqs) { - if (req && (req->flags() & PREPARED)) - req->start(); - } - - MPI_Request* treqs = &(*reqqs)[0]; - Request::waitall(size, treqs, MPI_STATUSES_IGNORE); - reqqs->clear(); - } - xbt_mutex_release(mut_); + int finished = finish_comms(); + XBT_DEBUG("Win_complete - Finished %d RMA calls", finished); Group::unref(group_); opened_--; //we're closed for business ! @@ -390,7 +414,7 @@ int Win::wait(){ while(j!=size){ int src=group_->index(j); - if(src!=smpi_process_index() && src!=MPI_UNDEFINED){ + if(src!=smpi_process()->index() && src!=MPI_UNDEFINED){ reqs[i]=Request::irecv_init(nullptr, 0, MPI_CHAR, src,SMPI_RMA_TAG+5, MPI_COMM_WORLD); i++; } @@ -404,11 +428,65 @@ int Win::wait(){ Request::unref(&reqs[i]); } xbt_free(reqs); + int finished = finish_comms(); + XBT_DEBUG("Win_wait - Finished %d RMA calls", finished); + + Group::unref(group_); + opened_--; //we're opened for business ! + return MPI_SUCCESS; +} + +int Win::lock(int lock_type, int rank, int assert){ + if(opened_!=0) + return MPI_ERR_WIN; + + MPI_Win target_win = connected_wins_[rank]; + + if ((lock_type == MPI_LOCK_EXCLUSIVE && target_win->mode_ != MPI_LOCK_SHARED)|| target_win->mode_ == MPI_LOCK_EXCLUSIVE){ + xbt_mutex_acquire(target_win->lock_mut_); + target_win->mode_+= lock_type;//add the lock_type to differentiate case when we are switching from EXCLUSIVE to SHARED (no release needed in the unlock) + if(lock_type == MPI_LOCK_SHARED){//the window used to be exclusive, it's now shared. + xbt_mutex_release(target_win->lock_mut_); + } + } else if(!(target_win->mode_==MPI_LOCK_SHARED && lock_type == MPI_LOCK_EXCLUSIVE)) + target_win->mode_+= lock_type; // don't set to exclusive if it's already shared + + target_win->lockers_.push_back(comm_->rank()); + + int finished = finish_comms(); + XBT_DEBUG("Win_lock %d - Finished %d RMA calls", rank, finished); + + return MPI_SUCCESS; +} + +int Win::unlock(int rank){ + if(opened_!=0) + return MPI_ERR_WIN; + + MPI_Win target_win = connected_wins_[rank]; + int target_mode = target_win->mode_; + target_win->mode_= 0; + target_win->lockers_.remove(comm_->rank()); + if (target_mode==MPI_LOCK_EXCLUSIVE){ + xbt_mutex_release(target_win->lock_mut_); + } + + int finished = finish_comms(); + XBT_DEBUG("Win_unlock %d - Finished %d RMA calls", rank, finished); + + return MPI_SUCCESS; +} + +Win* Win::f2c(int id){ + return static_cast(F2C::f2c(id)); +} + + +int Win::finish_comms(){ xbt_mutex_acquire(mut_); + //Finish own requests std::vector *reqqs = requests_; - size = static_cast(reqqs->size()); - - XBT_DEBUG("Win_wait - Finishing %d RMA calls", size); + int size = static_cast(reqqs->size()); if (size > 0) { // start all requests that have been prepared by another process for (const auto& req : *reqqs) { @@ -421,15 +499,9 @@ int Win::wait(){ reqqs->clear(); } xbt_mutex_release(mut_); - - Group::unref(group_); - opened_--; //we're opened for business ! - return MPI_SUCCESS; + return size; } -Win* Win::f2c(int id){ - return static_cast(F2C::f2c(id)); -} } }