X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/20180cf998fe77ea59e5dbffd7ba0515124c775d..2d37e348a09783cda723c7019640ee69de168324:/src/smpi/mpi/smpi_win.cpp diff --git a/src/smpi/mpi/smpi_win.cpp b/src/smpi/mpi/smpi_win.cpp index 5a2544d586..cefc4c567a 100644 --- a/src/smpi/mpi/smpi_win.cpp +++ b/src/smpi/mpi/smpi_win.cpp @@ -1,17 +1,18 @@ -/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */ +/* Copyright (c) 2007-2019. The SimGrid Team. All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ #include "smpi_win.hpp" + #include "private.hpp" #include "smpi_coll.hpp" #include "smpi_comm.hpp" #include "smpi_datatype.hpp" #include "smpi_info.hpp" #include "smpi_keyvals.hpp" -#include "smpi_process.hpp" #include "smpi_request.hpp" +#include "src/smpi/include/smpi_actor.hpp" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_rma, smpi, "Logging specific to SMPI (RMA operations)"); @@ -39,23 +40,24 @@ Win::Win(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, connected_wins_[rank_] = this; count_ = 0; if(rank_==0){ - bar_ = MSG_barrier_init(comm_size); + bar_ = new simgrid::s4u::Barrier(comm_size); } mode_=0; comm->add_rma_win(this); + comm->ref(); Colls::allgather(&(connected_wins_[rank_]), sizeof(MPI_Win), MPI_BYTE, connected_wins_, sizeof(MPI_Win), MPI_BYTE, comm); - Colls::bcast(&(bar_), sizeof(msg_bar_t), MPI_BYTE, 0, comm); + Colls::bcast(&(bar_), sizeof(simgrid::s4u::Barrier*), MPI_BYTE, 0, comm); Colls::barrier(comm); } Win::~Win(){ //As per the standard, perform a barrier to ensure every async comm is finished - MSG_barrier_wait(bar_); + bar_->wait(); int finished = finish_comms(); XBT_DEBUG("Win destructor - Finished %d RMA calls", finished); @@ -72,8 +74,10 @@ Win::~Win(){ comm_->remove_rma_win(this); Colls::barrier(comm_); + Comm::unref(comm_); + if (rank_ == 0) - MSG_barrier_destroy(bar_); + delete bar_; xbt_mutex_destroy(mut_); xbt_mutex_destroy(lock_mut_); xbt_mutex_destroy(atomic_mut_); @@ -160,7 +164,7 @@ int Win::fence(int assert) opened_=1; if (assert != MPI_MODE_NOPRECEDE) { // This is not the first fence => finalize what came before - MSG_barrier_wait(bar_); + bar_->wait(); xbt_mutex_acquire(mut_); // This (simulated) mutex ensures that no process pushes to the vector of requests during the waitall. // Without this, the vector could get redimensionned when another process pushes. @@ -181,7 +185,7 @@ int Win::fence(int assert) opened_=0; assert_ = assert; - MSG_barrier_wait(bar_); + bar_->wait(); XBT_DEBUG("Leaving fence"); return MPI_SUCCESS; @@ -207,9 +211,9 @@ int Win::put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, return MPI_ERR_ARG; void* recv_addr = static_cast ( static_cast(recv_win->base_) + target_disp * recv_win->disp_unit_); - XBT_DEBUG("Entering MPI_Put to %d", target_rank); if (target_rank != comm_->rank()) { // This is not for myself, so we need to send messages + XBT_DEBUG("Entering MPI_Put to remote rank %d", target_rank); // prepare send_request MPI_Request sreq = // TODO cheinrich Check for rank / pid conversion @@ -239,6 +243,7 @@ int Win::put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, xbt_mutex_release(recv_win->mut_); }else{ + XBT_DEBUG("Entering MPI_Put from myself to myself, rank %d", target_rank); Datatype::copy(origin_addr, origin_count, origin_datatype, recv_addr, target_count, target_datatype); if(request!=nullptr) *request = MPI_REQUEST_NULL; @@ -625,18 +630,18 @@ int Win::unlock_all(){ int i=0; int retval = MPI_SUCCESS; for (i=0; isize();i++){ - int ret = this->unlock(i); - if(ret != MPI_SUCCESS) - retval = ret; + int ret = this->unlock(i); + if (ret != MPI_SUCCESS) + retval = ret; } return retval; } int Win::flush(int rank){ MPI_Win target_win = connected_wins_[rank]; - int finished = finish_comms(rank); + int finished = finish_comms(rank_); XBT_DEBUG("Win_flush on local %d - Finished %d RMA calls", rank_, finished); - finished = target_win->finish_comms(rank_); + finished = target_win->finish_comms(rank); XBT_DEBUG("Win_flush on remote %d - Finished %d RMA calls", rank, finished); return MPI_SUCCESS; } @@ -648,11 +653,9 @@ int Win::flush_local(int rank){ } int Win::flush_all(){ - int i=0; - int finished = 0; - finished = finish_comms(); + int finished = finish_comms(); XBT_DEBUG("Win_flush_all on local - Finished %d RMA calls", finished); - for (i=0; isize();i++){ + for (int i = 0; i < comm_->size(); i++) { finished = connected_wins_[i]->finish_comms(rank_); XBT_DEBUG("Win_flush_all on %d - Finished %d RMA calls", i, finished); } @@ -669,7 +672,6 @@ Win* Win::f2c(int id){ return static_cast(F2C::f2c(id)); } - int Win::finish_comms(){ xbt_mutex_acquire(mut_); //Finish own requests @@ -693,8 +695,13 @@ int Win::finish_comms(int rank){ size = 0; std::vector myreqqs; std::vector::iterator iter = reqqs->begin(); + int proc_id = comm_->group()->actor(rank)->get_pid(); while (iter != reqqs->end()){ - if(((*iter)!=MPI_REQUEST_NULL) && (((*iter)->src() == rank) || ((*iter)->dst() == rank))){ + // Let's see if we're either the destination or the sender of this request + // because we only wait for requests that we are responsible for. + // Also use the process id here since the request itself returns from src() + // and dst() the process id, NOT the rank (which only exists in the context of a communicator). + if (((*iter) != MPI_REQUEST_NULL) && (((*iter)->src() == proc_id) || ((*iter)->dst() == proc_id))) { myreqqs.push_back(*iter); iter = reqqs->erase(iter); size++; @@ -712,6 +719,22 @@ int Win::finish_comms(int rank){ return size; } - +int Win::shared_query(int rank, MPI_Aint* size, int* disp_unit, void* baseptr) +{ + MPI_Win target_win = rank != MPI_PROC_NULL ? connected_wins_[rank] : nullptr; + for (int i = 0; not target_win && i < comm_->size(); i++) { + if (connected_wins_[i]->size_ > 0) + target_win = connected_wins_[i]; + } + if (target_win) { + *size = target_win->size_; + *disp_unit = target_win->disp_unit_; + *static_cast(baseptr) = target_win->base_; + } else { + *size = 0; + *static_cast(baseptr) = xbt_malloc(0); + } + return MPI_SUCCESS; +} } }