-/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2007-2021. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "smpi_win.hpp"
+
#include "private.hpp"
#include "smpi_coll.hpp"
#include "smpi_comm.hpp"
#include "smpi_datatype.hpp"
#include "smpi_info.hpp"
#include "smpi_keyvals.hpp"
-#include "smpi_process.hpp"
#include "smpi_request.hpp"
+#include "src/smpi/include/smpi_actor.hpp"
+
+#include <algorithm>
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_rma, smpi, "Logging specific to SMPI (RMA operations)");
-using simgrid::s4u::Actor;
+#define CHECK_RMA_REMOTE_WIN(fun, win)\
+ if(target_count*target_datatype->get_extent()>win->size_){\
+ XBT_WARN("%s: Trying to move %zd, which exceeds the window size on target process %d : %zd - Bailing out.",\
+ fun, target_count*target_datatype->get_extent(), target_rank, win->size_);\
+ simgrid::smpi::utils::set_current_buffer(1,"win_base",win->base_);\
+ return MPI_ERR_RMA_RANGE;\
+ }
+
+#define CHECK_WIN_LOCKED(win) \
+ if (opened_ == 0) { /*check that post/start has been done*/ \
+ bool locked = std::any_of(begin(win->lockers_), end(win->lockers_), [this](int it) { return it == this->rank_; }); \
+ if (not locked) \
+ return MPI_ERR_WIN; \
+ }
namespace simgrid{
namespace smpi{
std::unordered_map<int, smpi_key_elem> Win::keyvals_;
int Win::keyval_id_=0;
-Win::Win(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, int allocated, int dynamic): base_(base), size_(size), disp_unit_(disp_unit), assert_(0), info_(info), comm_(comm), allocated_(allocated), dynamic_(dynamic){
- int comm_size = comm->size();
- rank_ = comm->rank();
+Win::Win(void* base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, bool allocated, bool dynamic)
+ : base_(base)
+ , size_(size)
+ , disp_unit_(disp_unit)
+ , info_(info)
+ , comm_(comm)
+ , connected_wins_(comm->size())
+ , rank_(comm->rank())
+ , allocated_(allocated)
+ , dynamic_(dynamic)
+{
XBT_DEBUG("Creating window");
if(info!=MPI_INFO_NULL)
info->ref();
- name_ = nullptr;
- opened_ = 0;
- group_ = MPI_GROUP_NULL;
- requests_ = new std::vector<MPI_Request>();
- mut_=xbt_mutex_init();
- lock_mut_=xbt_mutex_init();
- atomic_mut_=xbt_mutex_init();
- connected_wins_ = new MPI_Win[comm_size];
connected_wins_[rank_] = this;
- count_ = 0;
if(rank_==0){
- bar_ = MSG_barrier_init(comm_size);
+ bar_ = new s4u::Barrier(comm->size());
}
- mode_=0;
-
+ errhandler_->ref();
comm->add_rma_win(this);
+ comm->ref();
- Colls::allgather(&(connected_wins_[rank_]), sizeof(MPI_Win), MPI_BYTE, connected_wins_, sizeof(MPI_Win),
- MPI_BYTE, comm);
+ colls::allgather(&connected_wins_[rank_], sizeof(MPI_Win), MPI_BYTE, connected_wins_.data(), sizeof(MPI_Win),
+ MPI_BYTE, comm);
- Colls::bcast(&(bar_), sizeof(msg_bar_t), MPI_BYTE, 0, comm);
+ colls::bcast(&bar_, sizeof(s4u::Barrier*), MPI_BYTE, 0, comm);
- Colls::barrier(comm);
+ colls::barrier(comm);
+ this->add_f();
}
Win::~Win(){
//As per the standard, perform a barrier to ensure every async comm is finished
- MSG_barrier_wait(bar_);
+ bar_->wait();
- int finished = finish_comms();
- XBT_DEBUG("Win destructor - Finished %d RMA calls", finished);
+ flush_local_all();
- delete requests_;
- delete[] connected_wins_;
- if (name_ != nullptr){
- xbt_free(name_);
- }
- if(info_!=MPI_INFO_NULL){
- MPI_Info_free(&info_);
- }
+ if (info_ != MPI_INFO_NULL)
+ simgrid::smpi::Info::unref(info_);
+ if (errhandler_ != MPI_ERRHANDLER_NULL)
+ simgrid::smpi::Errhandler::unref(errhandler_);
comm_->remove_rma_win(this);
- Colls::barrier(comm_);
+ colls::barrier(comm_);
+ Comm::unref(comm_);
+
if (rank_ == 0)
- MSG_barrier_destroy(bar_);
- xbt_mutex_destroy(mut_);
- xbt_mutex_destroy(lock_mut_);
- xbt_mutex_destroy(atomic_mut_);
+ delete bar_;
- if(allocated_ !=0)
+ if (allocated_)
xbt_free(base_);
+ F2C::free_f(this->f2c_id());
cleanup_attr<Win>();
}
-int Win::attach (void *base, MPI_Aint size){
- if (not(base_ == MPI_BOTTOM || base_ == 0))
+int Win::attach(void* /*base*/, MPI_Aint size)
+{
+ if (not(base_ == MPI_BOTTOM || base_ == nullptr))
return MPI_ERR_ARG;
- base_=0;//actually the address will be given in the RMA calls, as being the disp.
+ base_ = nullptr; // actually the address will be given in the RMA calls, as being the disp.
size_+=size;
return MPI_SUCCESS;
}
-int Win::detach (void *base){
+int Win::detach(const void* /*base*/)
+{
base_=MPI_BOTTOM;
size_=-1;
return MPI_SUCCESS;
}
-void Win::get_name(char* name, int* length){
- if(name_==nullptr){
- *length=0;
- name=nullptr;
- return;
+void Win::get_name(char* name, int* length) const
+{
+ *length = static_cast<int>(name_.length());
+ if (not name_.empty()) {
+ name_.copy(name, *length);
+ name[*length] = '\0';
}
- *length = strlen(name_);
- strncpy(name, name_, *length+1);
}
void Win::get_group(MPI_Group* group){
}
}
-MPI_Info Win::info(){
- if(info_== MPI_INFO_NULL)
- info_ = new Info();
- info_->ref();
+MPI_Info Win::info()
+{
return info_;
}
-int Win::rank(){
+int Win::rank() const
+{
return rank_;
}
-MPI_Aint Win::size(){
+MPI_Comm Win::comm() const
+{
+ return comm_;
+}
+
+MPI_Aint Win::size() const
+{
return size_;
}
-void* Win::base(){
+void* Win::base() const
+{
return base_;
}
-int Win::disp_unit(){
+int Win::disp_unit() const
+{
return disp_unit_;
}
-int Win::dynamic(){
+bool Win::dynamic() const
+{
return dynamic_;
}
-void Win::set_info(MPI_Info info){
- if(info_!= MPI_INFO_NULL)
- info->ref();
- info_=info;
+void Win::set_info(MPI_Info info)
+{
+ if (info_ != MPI_INFO_NULL)
+ simgrid::smpi::Info::unref(info_);
+ info_ = info;
+ if (info_ != MPI_INFO_NULL)
+ info_->ref();
}
-void Win::set_name(char* name){
- name_ = xbt_strdup(name);
+void Win::set_name(const char* name){
+ name_ = name;
}
int Win::fence(int assert)
{
XBT_DEBUG("Entering fence");
- if (opened_ == 0)
- opened_=1;
- if (assert != MPI_MODE_NOPRECEDE) {
+ opened_++;
+ if (not (assert & MPI_MODE_NOPRECEDE)) {
// This is not the first fence => finalize what came before
- MSG_barrier_wait(bar_);
- xbt_mutex_acquire(mut_);
- // This (simulated) mutex ensures that no process pushes to the vector of requests during the waitall.
- // Without this, the vector could get redimensionned when another process pushes.
- // This would result in the array used by Request::waitall() to be invalidated.
- // Another solution would be to copy the data and cleanup the vector *before* Request::waitall
- std::vector<MPI_Request> *reqs = requests_;
- int size = static_cast<int>(reqs->size());
- // start all requests that have been prepared by another process
- if (size > 0) {
- MPI_Request* treqs = &(*reqs)[0];
- Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
- }
+ bar_->wait();
+ flush_local_all();
count_=0;
- xbt_mutex_release(mut_);
}
- if(assert==MPI_MODE_NOSUCCEED)//there should be no ops after this one, tell we are closed.
+ if (assert & MPI_MODE_NOSUCCEED) // there should be no ops after this one, tell we are closed.
opened_=0;
assert_ = assert;
- MSG_barrier_wait(bar_);
+ bar_->wait();
XBT_DEBUG("Leaving fence");
return MPI_SUCCESS;
}
-int Win::put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
+int Win::put(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Request* request)
{
//get receiver pointer
- MPI_Win recv_win = connected_wins_[target_rank];
-
- if(opened_==0){//check that post/start has been done
- // no fence or start .. lock ok ?
- int locked=0;
- for (auto const& it : recv_win->lockers_)
- if (it == comm_->rank())
- locked = 1;
- if(locked != 1)
- return MPI_ERR_WIN;
- }
+ Win* recv_win = connected_wins_[target_rank];
- if(target_count*target_datatype->get_extent()>recv_win->size_)
- return MPI_ERR_ARG;
+ CHECK_WIN_LOCKED(recv_win)
+ CHECK_RMA_REMOTE_WIN("MPI_Put", recv_win)
- void* recv_addr = static_cast<void*> ( static_cast<char*>(recv_win->base_) + target_disp * recv_win->disp_unit_);
- XBT_DEBUG("Entering MPI_Put to %d", target_rank);
+ void* recv_addr = static_cast<char*>(recv_win->base_) + target_disp * recv_win->disp_unit_;
- if (target_rank != comm_->rank()) { // This is not for myself, so we need to send messages
- //prepare send_request
+ if (target_rank != rank_) { // This is not for myself, so we need to send messages
+ XBT_DEBUG("Entering MPI_Put to remote rank %d", target_rank);
+ // prepare send_request
MPI_Request sreq =
- // TODO cheinrich Check for rank / pid conversion
- Request::rma_send_init(origin_addr, origin_count, origin_datatype, comm_->rank(),
- target_rank, SMPI_RMA_TAG + 1, comm_, MPI_OP_NULL);
+ Request::rma_send_init(origin_addr, origin_count, origin_datatype, rank_, target_rank, SMPI_RMA_TAG + 1, comm_,
+ MPI_OP_NULL);
//prepare receiver request
- // TODO cheinrich Check for rank / pid conversion
- MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, recv_win->comm_->rank(),
- target_rank, SMPI_RMA_TAG + 1,
- recv_win->comm_, MPI_OP_NULL);
+ MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, rank_, target_rank,
+ SMPI_RMA_TAG + 1, recv_win->comm_, MPI_OP_NULL);
//start send
sreq->start();
if(request!=nullptr){
*request=sreq;
}else{
- xbt_mutex_acquire(mut_);
- requests_->push_back(sreq);
- xbt_mutex_release(mut_);
+ mut_->lock();
+ requests_.push_back(sreq);
+ mut_->unlock();
}
//push request to receiver's win
- xbt_mutex_acquire(recv_win->mut_);
- recv_win->requests_->push_back(rreq);
+ recv_win->mut_->lock();
+ recv_win->requests_.push_back(rreq);
rreq->start();
- xbt_mutex_release(recv_win->mut_);
-
- }else{
+ recv_win->mut_->unlock();
+ } else {
+ XBT_DEBUG("Entering MPI_Put from myself to myself, rank %d", target_rank);
Datatype::copy(origin_addr, origin_count, origin_datatype, recv_addr, target_count, target_datatype);
if(request!=nullptr)
*request = MPI_REQUEST_NULL;
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Request* request)
{
//get sender pointer
- MPI_Win send_win = connected_wins_[target_rank];
-
- if(opened_==0){//check that post/start has been done
- // no fence or start .. lock ok ?
- int locked=0;
- for (auto const& it : send_win->lockers_)
- if (it == comm_->rank())
- locked = 1;
- if(locked != 1)
- return MPI_ERR_WIN;
- }
+ Win* send_win = connected_wins_[target_rank];
- if(target_count*target_datatype->get_extent()>send_win->size_)
- return MPI_ERR_ARG;
+ CHECK_WIN_LOCKED(send_win)
+ CHECK_RMA_REMOTE_WIN("MPI_Get", send_win)
- void* send_addr = static_cast<void*>(static_cast<char*>(send_win->base_) + target_disp * send_win->disp_unit_);
+ const void* send_addr = static_cast<void*>(static_cast<char*>(send_win->base_) + target_disp * send_win->disp_unit_);
XBT_DEBUG("Entering MPI_Get from %d", target_rank);
- if(target_rank != comm_->rank()){
+ if (target_rank != rank_) {
//prepare send_request
- MPI_Request sreq = Request::rma_send_init(send_addr, target_count, target_datatype,
- target_rank, send_win->comm_->rank(),
+ MPI_Request sreq = Request::rma_send_init(send_addr, target_count, target_datatype, target_rank, rank_,
SMPI_RMA_TAG + 2, send_win->comm_, MPI_OP_NULL);
//prepare receiver request
- MPI_Request rreq = Request::rma_recv_init(origin_addr, origin_count, origin_datatype,
- target_rank, comm_->rank(), // TODO cheinrich Check here if comm_->rank() and above send_win->comm_->rank() are correct
+ MPI_Request rreq = Request::rma_recv_init(origin_addr, origin_count, origin_datatype, target_rank, rank_,
SMPI_RMA_TAG + 2, comm_, MPI_OP_NULL);
//start the send, with another process than us as sender.
sreq->start();
- //push request to receiver's win
- xbt_mutex_acquire(send_win->mut_);
- send_win->requests_->push_back(sreq);
- xbt_mutex_release(send_win->mut_);
+ // push request to sender's win
+ send_win->mut_->lock();
+ send_win->requests_.push_back(sreq);
+ send_win->mut_->unlock();
//start recv
rreq->start();
if(request!=nullptr){
*request=rreq;
}else{
- xbt_mutex_acquire(mut_);
- requests_->push_back(rreq);
- xbt_mutex_release(mut_);
+ mut_->lock();
+ requests_.push_back(rreq);
+ mut_->unlock();
}
-
- }else{
+ } else {
Datatype::copy(send_addr, target_count, target_datatype, origin_addr, origin_count, origin_datatype);
if(request!=nullptr)
*request=MPI_REQUEST_NULL;
}
-
return MPI_SUCCESS;
}
-
-int Win::accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
+int Win::accumulate(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Request* request)
{
XBT_DEBUG("Entering MPI_Win_Accumulate");
//get receiver pointer
- MPI_Win recv_win = connected_wins_[target_rank];
-
- if(opened_==0){//check that post/start has been done
- // no fence or start .. lock ok ?
- int locked=0;
- for (auto const& it : recv_win->lockers_)
- if (it == comm_->rank())
- locked = 1;
- if(locked != 1)
- return MPI_ERR_WIN;
- }
- //FIXME: local version
+ Win* recv_win = connected_wins_[target_rank];
- if(target_count*target_datatype->get_extent()>recv_win->size_)
- return MPI_ERR_ARG;
+ //FIXME: local version
+ CHECK_WIN_LOCKED(recv_win)
+ CHECK_RMA_REMOTE_WIN("MPI_Accumulate", recv_win)
- void* recv_addr = static_cast<void*>(static_cast<char*>(recv_win->base_) + target_disp * recv_win->disp_unit_);
+ void* recv_addr = static_cast<char*>(recv_win->base_) + target_disp * recv_win->disp_unit_;
XBT_DEBUG("Entering MPI_Accumulate to %d", target_rank);
- //As the tag will be used for ordering of the operations, substract count from it (to avoid collisions with other SMPI tags, SMPI_RMA_TAG is set below all the other ones we use )
- //prepare send_request
+ // As the tag will be used for ordering of the operations, subtract count from it (to avoid collisions with other
+ // SMPI tags, SMPI_RMA_TAG is set below all the other ones we use)
+ // prepare send_request
- MPI_Request sreq =
- Request::rma_send_init(origin_addr, origin_count, origin_datatype, comm_->rank(),
- target_rank, SMPI_RMA_TAG - 3 - count_, comm_, op);
+ MPI_Request sreq = Request::rma_send_init(origin_addr, origin_count, origin_datatype, rank_, target_rank,
+ SMPI_RMA_TAG - 3 - count_, comm_, op);
// prepare receiver request
- MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, recv_win->comm_->rank(),
- target_rank, SMPI_RMA_TAG - 3 - count_,
- recv_win->comm_, op);
+ MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, rank_, target_rank,
+ SMPI_RMA_TAG - 3 - count_, recv_win->comm_, op);
count_++;
// start send
sreq->start();
// push request to receiver's win
- xbt_mutex_acquire(recv_win->mut_);
- recv_win->requests_->push_back(rreq);
+ recv_win->mut_->lock();
+ recv_win->requests_.push_back(rreq);
rreq->start();
- xbt_mutex_release(recv_win->mut_);
+ recv_win->mut_->unlock();
if (request != nullptr) {
*request = sreq;
- }else{
- xbt_mutex_acquire(mut_);
- requests_->push_back(sreq);
- xbt_mutex_release(mut_);
- }
+ } else {
+ mut_->lock();
+ requests_.push_back(sreq);
+ mut_->unlock();
+ }
+ // FIXME: The current implementation fails to ensure the correct ordering of the accumulate requests. The following
+ // 'flush' is a workaround to fix that.
+ flush(target_rank);
XBT_DEBUG("Leaving MPI_Win_Accumulate");
return MPI_SUCCESS;
}
-int Win::get_accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr,
- int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count,
- MPI_Datatype target_datatype, MPI_Op op, MPI_Request* request){
-
+int Win::get_accumulate(const void* origin_addr, int origin_count, MPI_Datatype origin_datatype, void* result_addr,
+ int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp,
+ int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Request*)
+{
//get sender pointer
- MPI_Win send_win = connected_wins_[target_rank];
-
- if(opened_==0){//check that post/start has been done
- // no fence or start .. lock ok ?
- int locked=0;
- for (auto const& it : send_win->lockers_)
- if (it == comm_->rank())
- locked = 1;
- if(locked != 1)
- return MPI_ERR_WIN;
- }
+ const Win* send_win = connected_wins_[target_rank];
- if(target_count*target_datatype->get_extent()>send_win->size_)
- return MPI_ERR_ARG;
+ CHECK_WIN_LOCKED(send_win)
+ CHECK_RMA_REMOTE_WIN("MPI_Get_Accumulate", send_win)
XBT_DEBUG("Entering MPI_Get_accumulate from %d", target_rank);
//need to be sure ops are correctly ordered, so finish request here ? slow.
- MPI_Request req;
- xbt_mutex_acquire(send_win->atomic_mut_);
+ MPI_Request req = MPI_REQUEST_NULL;
+ send_win->atomic_mut_->lock();
get(result_addr, result_count, result_datatype, target_rank,
target_disp, target_count, target_datatype, &req);
if (req != MPI_REQUEST_NULL)
target_disp, target_count, target_datatype, op, &req);
if (req != MPI_REQUEST_NULL)
Request::wait(&req, MPI_STATUS_IGNORE);
- xbt_mutex_release(send_win->atomic_mut_);
+ send_win->atomic_mut_->unlock();
return MPI_SUCCESS;
-
}
-int Win::compare_and_swap(void *origin_addr, void *compare_addr,
- void *result_addr, MPI_Datatype datatype, int target_rank,
- MPI_Aint target_disp){
+int Win::compare_and_swap(const void* origin_addr, const void* compare_addr, void* result_addr, MPI_Datatype datatype,
+ int target_rank, MPI_Aint target_disp)
+{
//get sender pointer
- MPI_Win send_win = connected_wins_[target_rank];
-
- if(opened_==0){//check that post/start has been done
- // no fence or start .. lock ok ?
- int locked=0;
- for (auto const& it : send_win->lockers_)
- if (it == comm_->rank())
- locked = 1;
- if(locked != 1)
- return MPI_ERR_WIN;
- }
+ const Win* send_win = connected_wins_[target_rank];
+
+ CHECK_WIN_LOCKED(send_win)
XBT_DEBUG("Entering MPI_Compare_and_swap with %d", target_rank);
MPI_Request req = MPI_REQUEST_NULL;
- xbt_mutex_acquire(send_win->atomic_mut_);
+ send_win->atomic_mut_->lock();
get(result_addr, 1, datatype, target_rank,
target_disp, 1, datatype, &req);
if (req != MPI_REQUEST_NULL)
put(origin_addr, 1, datatype, target_rank,
target_disp, 1, datatype);
}
- xbt_mutex_release(send_win->atomic_mut_);
+ send_win->atomic_mut_->unlock();
return MPI_SUCCESS;
}
-int Win::start(MPI_Group group, int assert){
- /* From MPI forum advices
- The call to MPI_WIN_COMPLETE does not return until the put call has completed at the origin; and the target window
- will be accessed by the put operation only after the call to MPI_WIN_START has matched a call to MPI_WIN_POST by
- the target process. This still leaves much choice to implementors. The call to MPI_WIN_START can block until the
- matching call to MPI_WIN_POST occurs at all target processes. One can also have implementations where the call to
- MPI_WIN_START is nonblocking, but the call to MPI_PUT blocks until the matching call to MPI_WIN_POST occurred; or
- implementations where the first two calls are nonblocking, but the call to MPI_WIN_COMPLETE blocks until the call
- to MPI_WIN_POST occurred; or even implementations where all three calls can complete before any target process
- called MPI_WIN_POST --- the data put must be buffered, in this last case, so as to allow the put to complete at the
- origin ahead of its completion at the target. However, once the call to MPI_WIN_POST is issued, the sequence above
- must complete, without further dependencies. */
+int Win::start(MPI_Group group, int /*assert*/)
+{
+ /* From MPI forum advices
+ The call to MPI_WIN_COMPLETE does not return until the put call has completed at the origin; and the target window
+ will be accessed by the put operation only after the call to MPI_WIN_START has matched a call to MPI_WIN_POST by
+ the target process. This still leaves much choice to implementors. The call to MPI_WIN_START can block until the
+ matching call to MPI_WIN_POST occurs at all target processes. One can also have implementations where the call to
+ MPI_WIN_START is nonblocking, but the call to MPI_PUT blocks until the matching call to MPI_WIN_POST occurred; or
+ implementations where the first two calls are nonblocking, but the call to MPI_WIN_COMPLETE blocks until the call
+ to MPI_WIN_POST occurred; or even implementations where all three calls can complete before any target process
+ called MPI_WIN_POST --- the data put must be buffered, in this last case, so as to allow the put to complete at the
+ origin ahead of its completion at the target. However, once the call to MPI_WIN_POST is issued, the sequence above
+ must complete, without further dependencies. */
//naive, blocking implementation.
- int i = 0;
- int j = 0;
- int size = group->size();
- MPI_Request* reqs = xbt_new0(MPI_Request, size);
-
XBT_DEBUG("Entering MPI_Win_Start");
- while (j != size) {
- int src = comm_->group()->rank(group->actor(j));
- if (src != rank_ && src != MPI_UNDEFINED) { // TODO cheinrich: The check of MPI_UNDEFINED should be useless here
- reqs[i] = Request::irecv_init(nullptr, 0, MPI_CHAR, src, SMPI_RMA_TAG + 4, comm_);
- i++;
- }
- j++;
- }
- size=i;
- Request::startall(size, reqs);
- Request::waitall(size, reqs, MPI_STATUSES_IGNORE);
- for(i=0;i<size;i++){
- Request::unref(&reqs[i]);
+ std::vector<MPI_Request> reqs;
+ for (int i = 0; i < group->size(); i++) {
+ int src = comm_->group()->rank(group->actor(i));
+ xbt_assert(src != MPI_UNDEFINED);
+ if (src != rank_)
+ reqs.emplace_back(Request::irecv_init(nullptr, 0, MPI_CHAR, src, SMPI_RMA_TAG + 4, comm_));
}
- xbt_free(reqs);
- opened_++; //we're open for business !
- group_=group;
+ int size = static_cast<int>(reqs.size());
+
+ Request::startall(size, reqs.data());
+ Request::waitall(size, reqs.data(), MPI_STATUSES_IGNORE);
+ for (auto& req : reqs)
+ Request::unref(&req);
+
group->ref();
+ dst_group_ = group;
+ opened_++; // we're open for business !
XBT_DEBUG("Leaving MPI_Win_Start");
return MPI_SUCCESS;
}
-int Win::post(MPI_Group group, int assert){
+int Win::post(MPI_Group group, int /*assert*/)
+{
//let's make a synchronous send here
- int i = 0;
- int j = 0;
- int size = group->size();
- MPI_Request* reqs = xbt_new0(MPI_Request, size);
-
XBT_DEBUG("Entering MPI_Win_Post");
- while(j!=size){
- int dst = comm_->group()->rank(group->actor(j));
- if (dst != rank_ && dst != MPI_UNDEFINED) {
- reqs[i]=Request::send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG+4, comm_);
- i++;
- }
- j++;
+ std::vector<MPI_Request> reqs;
+ for (int i = 0; i < group->size(); i++) {
+ int dst = comm_->group()->rank(group->actor(i));
+ xbt_assert(dst != MPI_UNDEFINED);
+ if (dst != rank_)
+ reqs.emplace_back(Request::send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG + 4, comm_));
}
- size=i;
+ int size = static_cast<int>(reqs.size());
+
+ Request::startall(size, reqs.data());
+ Request::waitall(size, reqs.data(), MPI_STATUSES_IGNORE);
+ for (auto& req : reqs)
+ Request::unref(&req);
- Request::startall(size, reqs);
- Request::waitall(size, reqs, MPI_STATUSES_IGNORE);
- for(i=0;i<size;i++){
- Request::unref(&reqs[i]);
- }
- xbt_free(reqs);
- opened_++; //we're open for business !
- group_=group;
group->ref();
+ src_group_ = group;
+ opened_++; // we're open for business !
XBT_DEBUG("Leaving MPI_Win_Post");
return MPI_SUCCESS;
}
int Win::complete(){
- if(opened_==0)
- xbt_die("Complete called on already opened MPI_Win");
+ xbt_assert(opened_ != 0, "Complete called on already opened MPI_Win");
XBT_DEBUG("Entering MPI_Win_Complete");
- int i = 0;
- int j = 0;
- int size = group_->size();
- MPI_Request* reqs = xbt_new0(MPI_Request, size);
-
- while(j!=size){
- int dst = comm_->group()->rank(group_->actor(j));
- if (dst != rank_ && dst != MPI_UNDEFINED) {
- reqs[i]=Request::send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG+5, comm_);
- i++;
- }
- j++;
+ std::vector<MPI_Request> reqs;
+ for (int i = 0; i < dst_group_->size(); i++) {
+ int dst = comm_->group()->rank(dst_group_->actor(i));
+ xbt_assert(dst != MPI_UNDEFINED);
+ if (dst != rank_)
+ reqs.emplace_back(Request::send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG + 5, comm_));
}
- size=i;
- XBT_DEBUG("Win_complete - Sending sync messages to %d processes", size);
- Request::startall(size, reqs);
- Request::waitall(size, reqs, MPI_STATUSES_IGNORE);
+ int size = static_cast<int>(reqs.size());
- for(i=0;i<size;i++){
- Request::unref(&reqs[i]);
- }
- xbt_free(reqs);
+ XBT_DEBUG("Win_complete - Sending sync messages to %d processes", size);
+ Request::startall(size, reqs.data());
+ Request::waitall(size, reqs.data(), MPI_STATUSES_IGNORE);
+ for (auto& req : reqs)
+ Request::unref(&req);
- int finished = finish_comms();
- XBT_DEBUG("Win_complete - Finished %d RMA calls", finished);
+ flush_local_all();
- Group::unref(group_);
opened_--; //we're closed for business !
+ Group::unref(dst_group_);
+ dst_group_ = MPI_GROUP_NULL;
return MPI_SUCCESS;
}
int Win::wait(){
//naive, blocking implementation.
XBT_DEBUG("Entering MPI_Win_Wait");
- int i = 0;
- int j = 0;
- int size = group_->size();
- MPI_Request* reqs = xbt_new0(MPI_Request, size);
-
- while(j!=size){
- int src = comm_->group()->rank(group_->actor(j));
- if (src != rank_ && src != MPI_UNDEFINED) {
- reqs[i]=Request::irecv_init(nullptr, 0, MPI_CHAR, src,SMPI_RMA_TAG+5, comm_);
- i++;
- }
- j++;
+ std::vector<MPI_Request> reqs;
+ for (int i = 0; i < src_group_->size(); i++) {
+ int src = comm_->group()->rank(src_group_->actor(i));
+ xbt_assert(src != MPI_UNDEFINED);
+ if (src != rank_)
+ reqs.emplace_back(Request::irecv_init(nullptr, 0, MPI_CHAR, src, SMPI_RMA_TAG + 5, comm_));
}
- size=i;
+ int size = static_cast<int>(reqs.size());
+
XBT_DEBUG("Win_wait - Receiving sync messages from %d processes", size);
- Request::startall(size, reqs);
- Request::waitall(size, reqs, MPI_STATUSES_IGNORE);
- for(i=0;i<size;i++){
- Request::unref(&reqs[i]);
- }
- xbt_free(reqs);
- int finished = finish_comms();
- XBT_DEBUG("Win_wait - Finished %d RMA calls", finished);
+ Request::startall(size, reqs.data());
+ Request::waitall(size, reqs.data(), MPI_STATUSES_IGNORE);
+ for (auto& req : reqs)
+ Request::unref(&req);
- Group::unref(group_);
- opened_--; //we're opened for business !
+ flush_local_all();
+
+ opened_--; //we're closed for business !
+ Group::unref(src_group_);
+ src_group_ = MPI_GROUP_NULL;
return MPI_SUCCESS;
}
-int Win::lock(int lock_type, int rank, int assert){
+int Win::lock(int lock_type, int rank, int /*assert*/)
+{
MPI_Win target_win = connected_wins_[rank];
if ((lock_type == MPI_LOCK_EXCLUSIVE && target_win->mode_ != MPI_LOCK_SHARED)|| target_win->mode_ == MPI_LOCK_EXCLUSIVE){
- xbt_mutex_acquire(target_win->lock_mut_);
+ target_win->lock_mut_->lock();
target_win->mode_+= lock_type;//add the lock_type to differentiate case when we are switching from EXCLUSIVE to SHARED (no release needed in the unlock)
if(lock_type == MPI_LOCK_SHARED){//the window used to be exclusive, it's now shared.
- xbt_mutex_release(target_win->lock_mut_);
+ target_win->lock_mut_->unlock();
}
} else if (not(target_win->mode_ == MPI_LOCK_SHARED && lock_type == MPI_LOCK_EXCLUSIVE))
target_win->mode_ += lock_type; // don't set to exclusive if it's already shared
- target_win->lockers_.push_back(comm_->rank());
+ target_win->lockers_.push_back(rank_);
- int finished = finish_comms(rank);
- XBT_DEBUG("Win_lock %d - Finished %d RMA calls", rank, finished);
- finished = target_win->finish_comms(rank_);
- XBT_DEBUG("Win_lock target %d - Finished %d RMA calls", rank, finished);
+ flush(rank);
return MPI_SUCCESS;
}
int Win::lock_all(int assert){
- int i=0;
int retval = MPI_SUCCESS;
- for (i=0; i<comm_->size();i++){
- int ret = this->lock(MPI_LOCK_SHARED, i, assert);
- if(ret != MPI_SUCCESS)
- retval = ret;
+ for (int i = 0; i < comm_->size(); i++) {
+ int ret = this->lock(MPI_LOCK_SHARED, i, assert);
+ if (ret != MPI_SUCCESS)
+ retval = ret;
}
return retval;
}
MPI_Win target_win = connected_wins_[rank];
int target_mode = target_win->mode_;
target_win->mode_= 0;
- target_win->lockers_.remove(comm_->rank());
+ target_win->lockers_.remove(rank_);
if (target_mode==MPI_LOCK_EXCLUSIVE){
- xbt_mutex_release(target_win->lock_mut_);
+ target_win->lock_mut_->unlock();
}
- int finished = finish_comms(rank);
- XBT_DEBUG("Win_unlock %d - Finished %d RMA calls", rank, finished);
- finished = target_win->finish_comms(rank_);
- XBT_DEBUG("Win_unlock target %d - Finished %d RMA calls", rank, finished);
+ flush(rank);
return MPI_SUCCESS;
}
int Win::unlock_all(){
- int i=0;
int retval = MPI_SUCCESS;
- for (i=0; i<comm_->size();i++){
- int ret = this->unlock(i);
- if(ret != MPI_SUCCESS)
- retval = ret;
+ for (int i = 0; i < comm_->size(); i++) {
+ int ret = this->unlock(i);
+ if (ret != MPI_SUCCESS)
+ retval = ret;
}
return retval;
}
int Win::flush(int rank){
- MPI_Win target_win = connected_wins_[rank];
int finished = finish_comms(rank);
- XBT_DEBUG("Win_flush on local %d - Finished %d RMA calls", rank_, finished);
- finished = target_win->finish_comms(rank_);
- XBT_DEBUG("Win_flush on remote %d - Finished %d RMA calls", rank, finished);
+ XBT_DEBUG("Win_flush on local %d for remote %d - Finished %d RMA calls", rank_, rank, finished);
+ if (rank != rank_) {
+ finished = connected_wins_[rank]->finish_comms(rank_);
+ XBT_DEBUG("Win_flush on remote %d for local %d - Finished %d RMA calls", rank, rank_, finished);
+ }
return MPI_SUCCESS;
}
int Win::flush_local(int rank){
int finished = finish_comms(rank);
- XBT_DEBUG("Win_flush_local for rank %d - Finished %d RMA calls", rank, finished);
+ XBT_DEBUG("Win_flush_local on local %d for remote %d - Finished %d RMA calls", rank_, rank, finished);
return MPI_SUCCESS;
}
int Win::flush_all(){
- int i=0;
- int finished = 0;
- finished = finish_comms();
- XBT_DEBUG("Win_flush_all on local - Finished %d RMA calls", finished);
- for (i=0; i<comm_->size();i++){
- finished = connected_wins_[i]->finish_comms(rank_);
- XBT_DEBUG("Win_flush_all on %d - Finished %d RMA calls", i, finished);
+ int finished = finish_comms();
+ XBT_DEBUG("Win_flush_all on local %d - Finished %d RMA calls", rank_, finished);
+ for (int i = 0; i < comm_->size(); i++) {
+ if (i != rank_) {
+ finished = connected_wins_[i]->finish_comms(rank_);
+ XBT_DEBUG("Win_flush_all on remote %d for local %d - Finished %d RMA calls", i, rank_, finished);
+ }
}
return MPI_SUCCESS;
}
int Win::flush_local_all(){
int finished = finish_comms();
- XBT_DEBUG("Win_flush_local_all - Finished %d RMA calls", finished);
+ XBT_DEBUG("Win_flush_local_all on local %d - Finished %d RMA calls", rank_, finished);
return MPI_SUCCESS;
}
return static_cast<Win*>(F2C::f2c(id));
}
-
int Win::finish_comms(){
- xbt_mutex_acquire(mut_);
+ // This (simulated) mutex ensures that no process pushes to the vector of requests during the waitall.
+ // Without this, the vector could get redimensioned when another process pushes.
+ // This would result in the array used by Request::waitall() to be invalidated.
+ // Another solution would be to copy the data and cleanup the vector *before* Request::waitall
+ mut_->lock();
//Finish own requests
- std::vector<MPI_Request> *reqqs = requests_;
- int size = static_cast<int>(reqqs->size());
+ int size = static_cast<int>(requests_.size());
if (size > 0) {
- MPI_Request* treqs = &(*reqqs)[0];
+ MPI_Request* treqs = requests_.data();
Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
- reqqs->clear();
+ requests_.clear();
}
- xbt_mutex_release(mut_);
+ mut_->unlock();
return size;
}
int Win::finish_comms(int rank){
- xbt_mutex_acquire(mut_);
- //Finish own requests
- std::vector<MPI_Request> *reqqs = requests_;
- int size = static_cast<int>(reqqs->size());
+ // See comment about the mutex in finish_comms() above
+ mut_->lock();
+ // Finish own requests
+ // Let's see if we're either the destination or the sender of this request
+ // because we only wait for requests that we are responsible for.
+ // Also use the process id here since the request itself returns from src()
+ // and dst() the process id, NOT the rank (which only exists in the context of a communicator).
+ aid_t proc_id = comm_->group()->actor(rank);
+ auto it = std::stable_partition(begin(requests_), end(requests_), [proc_id](const MPI_Request& req) {
+ return (req == MPI_REQUEST_NULL || (req->src() != proc_id && req->dst() != proc_id));
+ });
+ std::vector<MPI_Request> myreqqs(it, end(requests_));
+ requests_.erase(it, end(requests_));
+ int size = static_cast<int>(myreqqs.size());
if (size > 0) {
- size = 0;
- std::vector<MPI_Request> myreqqs;
- std::vector<MPI_Request>::iterator iter = reqqs->begin();
- while (iter != reqqs->end()){
- if(((*iter)!=MPI_REQUEST_NULL) && (((*iter)->src() == rank) || ((*iter)->dst() == rank))){
- myreqqs.push_back(*iter);
- iter = reqqs->erase(iter);
- size++;
- } else {
- ++iter;
- }
- }
- if(size >0){
- MPI_Request* treqs = &myreqqs[0];
- Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
- myreqqs.clear();
- }
+ MPI_Request* treqs = myreqqs.data();
+ Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
+ myreqqs.clear();
}
- xbt_mutex_release(mut_);
+ mut_->unlock();
return size;
}
-
+int Win::shared_query(int rank, MPI_Aint* size, int* disp_unit, void* baseptr) const
+{
+ const Win* target_win = rank != MPI_PROC_NULL ? connected_wins_[rank] : nullptr;
+ for (int i = 0; not target_win && i < comm_->size(); i++) {
+ if (connected_wins_[i]->size_ > 0)
+ target_win = connected_wins_[i];
+ }
+ if (target_win) {
+ *size = target_win->size_;
+ *disp_unit = target_win->disp_unit_;
+ *static_cast<void**>(baseptr) = target_win->base_;
+ } else {
+ *size = 0;
+ *static_cast<void**>(baseptr) = nullptr;
+ }
+ return MPI_SUCCESS;
}
+
+MPI_Errhandler Win::errhandler()
+{
+ if (errhandler_ != MPI_ERRHANDLER_NULL)
+ errhandler_->ref();
+ return errhandler_;
}
+
+void Win::set_errhandler(MPI_Errhandler errhandler)
+{
+ if (errhandler_ != MPI_ERRHANDLER_NULL)
+ simgrid::smpi::Errhandler::unref(errhandler_);
+ errhandler_ = errhandler;
+ if (errhandler_ != MPI_ERRHANDLER_NULL)
+ errhandler_->ref();
+}
+} // namespace smpi
+} // namespace simgrid