-/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2007-2019. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "smpi_win.hpp"
+
#include "private.hpp"
#include "smpi_coll.hpp"
#include "smpi_comm.hpp"
#include "smpi_datatype.hpp"
#include "smpi_info.hpp"
#include "smpi_keyvals.hpp"
-#include "smpi_process.hpp"
#include "smpi_request.hpp"
+#include "src/smpi/include/smpi_actor.hpp"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_rma, smpi, "Logging specific to SMPI (RMA operations)");
-using simgrid::s4u::Actor;
namespace simgrid{
namespace smpi{
opened_ = 0;
group_ = MPI_GROUP_NULL;
requests_ = new std::vector<MPI_Request>();
- mut_ = xbt_mutex_init();
- lock_mut_ = xbt_mutex_init();
- atomic_mut_ = xbt_mutex_init();
+ mut_ = s4u::Mutex::create();
+ lock_mut_ = s4u::Mutex::create();
+ atomic_mut_ = s4u::Mutex::create();
connected_wins_ = new MPI_Win[comm_size];
connected_wins_[rank_] = this;
count_ = 0;
if(rank_==0){
- bar_ = MSG_barrier_init(comm_size);
+ bar_ = new s4u::Barrier(comm_size);
}
mode_=0;
comm->add_rma_win(this);
+ comm->ref();
Colls::allgather(&(connected_wins_[rank_]), sizeof(MPI_Win), MPI_BYTE, connected_wins_, sizeof(MPI_Win),
MPI_BYTE, comm);
- Colls::bcast(&(bar_), sizeof(msg_bar_t), MPI_BYTE, 0, comm);
+ Colls::bcast(&(bar_), sizeof(s4u::Barrier*), MPI_BYTE, 0, comm);
Colls::barrier(comm);
}
Win::~Win(){
//As per the standard, perform a barrier to ensure every async comm is finished
- MSG_barrier_wait(bar_);
+ bar_->wait();
int finished = finish_comms();
XBT_DEBUG("Win destructor - Finished %d RMA calls", finished);
comm_->remove_rma_win(this);
Colls::barrier(comm_);
+ Comm::unref(comm_);
+
if (rank_ == 0)
- MSG_barrier_destroy(bar_);
- xbt_mutex_destroy(mut_);
- xbt_mutex_destroy(lock_mut_);
- xbt_mutex_destroy(atomic_mut_);
+ delete bar_;
if(allocated_ !=0)
xbt_free(base_);
opened_=1;
if (assert != MPI_MODE_NOPRECEDE) {
// This is not the first fence => finalize what came before
- MSG_barrier_wait(bar_);
- xbt_mutex_acquire(mut_);
+ bar_->wait();
+ mut_->lock();
// This (simulated) mutex ensures that no process pushes to the vector of requests during the waitall.
// Without this, the vector could get redimensionned when another process pushes.
// This would result in the array used by Request::waitall() to be invalidated.
Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
}
count_=0;
- xbt_mutex_release(mut_);
+ mut_->unlock();
}
if(assert==MPI_MODE_NOSUCCEED)//there should be no ops after this one, tell we are closed.
opened_=0;
assert_ = assert;
- MSG_barrier_wait(bar_);
+ bar_->wait();
XBT_DEBUG("Leaving fence");
return MPI_SUCCESS;
return MPI_ERR_ARG;
void* recv_addr = static_cast<void*> ( static_cast<char*>(recv_win->base_) + target_disp * recv_win->disp_unit_);
- XBT_DEBUG("Entering MPI_Put to %d", target_rank);
if (target_rank != comm_->rank()) { // This is not for myself, so we need to send messages
+ XBT_DEBUG("Entering MPI_Put to remote rank %d", target_rank);
// prepare send_request
MPI_Request sreq =
// TODO cheinrich Check for rank / pid conversion
if(request!=nullptr){
*request=sreq;
}else{
- xbt_mutex_acquire(mut_);
+ mut_->lock();
requests_->push_back(sreq);
- xbt_mutex_release(mut_);
+ mut_->unlock();
}
//push request to receiver's win
- xbt_mutex_acquire(recv_win->mut_);
+ recv_win->mut_->lock();
recv_win->requests_->push_back(rreq);
rreq->start();
- xbt_mutex_release(recv_win->mut_);
+ recv_win->mut_->unlock();
}else{
+ XBT_DEBUG("Entering MPI_Put from myself to myself, rank %d", target_rank);
Datatype::copy(origin_addr, origin_count, origin_datatype, recv_addr, target_count, target_datatype);
if(request!=nullptr)
*request = MPI_REQUEST_NULL;
//start the send, with another process than us as sender.
sreq->start();
//push request to receiver's win
- xbt_mutex_acquire(send_win->mut_);
+ send_win->mut_->lock();
send_win->requests_->push_back(sreq);
- xbt_mutex_release(send_win->mut_);
+ send_win->mut_->unlock();
//start recv
rreq->start();
if(request!=nullptr){
*request=rreq;
}else{
- xbt_mutex_acquire(mut_);
+ mut_->lock();
requests_->push_back(rreq);
- xbt_mutex_release(mut_);
+ mut_->unlock();
}
}else{
// prepare receiver request
MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, recv_win->comm_->rank(),
- target_rank, SMPI_RMA_TAG - 3 - count_, recv_win->comm_, op);
+ recv_win->comm_->group()->rank(comm_->group()->actor(target_rank)), SMPI_RMA_TAG - 3 - count_, recv_win->comm_, op);
count_++;
// start send
sreq->start();
// push request to receiver's win
- xbt_mutex_acquire(recv_win->mut_);
+ recv_win->mut_->lock();
recv_win->requests_->push_back(rreq);
rreq->start();
- xbt_mutex_release(recv_win->mut_);
+ recv_win->mut_->unlock();
if (request != nullptr) {
*request = sreq;
} else {
- xbt_mutex_acquire(mut_);
+ mut_->lock();
requests_->push_back(sreq);
- xbt_mutex_release(mut_);
+ mut_->unlock();
}
XBT_DEBUG("Leaving MPI_Win_Accumulate");
XBT_DEBUG("Entering MPI_Get_accumulate from %d", target_rank);
//need to be sure ops are correctly ordered, so finish request here ? slow.
MPI_Request req;
- xbt_mutex_acquire(send_win->atomic_mut_);
+ send_win->atomic_mut_->lock();
get(result_addr, result_count, result_datatype, target_rank,
target_disp, target_count, target_datatype, &req);
if (req != MPI_REQUEST_NULL)
target_disp, target_count, target_datatype, op, &req);
if (req != MPI_REQUEST_NULL)
Request::wait(&req, MPI_STATUS_IGNORE);
- xbt_mutex_release(send_win->atomic_mut_);
+ send_win->atomic_mut_->unlock();
return MPI_SUCCESS;
}
XBT_DEBUG("Entering MPI_Compare_and_swap with %d", target_rank);
MPI_Request req = MPI_REQUEST_NULL;
- xbt_mutex_acquire(send_win->atomic_mut_);
+ send_win->atomic_mut_->lock();
get(result_addr, 1, datatype, target_rank,
target_disp, 1, datatype, &req);
if (req != MPI_REQUEST_NULL)
put(origin_addr, 1, datatype, target_rank,
target_disp, 1, datatype);
}
- xbt_mutex_release(send_win->atomic_mut_);
+ send_win->atomic_mut_->unlock();
return MPI_SUCCESS;
}
MPI_Win target_win = connected_wins_[rank];
if ((lock_type == MPI_LOCK_EXCLUSIVE && target_win->mode_ != MPI_LOCK_SHARED)|| target_win->mode_ == MPI_LOCK_EXCLUSIVE){
- xbt_mutex_acquire(target_win->lock_mut_);
+ target_win->lock_mut_->lock();
target_win->mode_+= lock_type;//add the lock_type to differentiate case when we are switching from EXCLUSIVE to SHARED (no release needed in the unlock)
if(lock_type == MPI_LOCK_SHARED){//the window used to be exclusive, it's now shared.
- xbt_mutex_release(target_win->lock_mut_);
+ target_win->lock_mut_->unlock();
}
} else if (not(target_win->mode_ == MPI_LOCK_SHARED && lock_type == MPI_LOCK_EXCLUSIVE))
target_win->mode_ += lock_type; // don't set to exclusive if it's already shared
target_win->mode_= 0;
target_win->lockers_.remove(comm_->rank());
if (target_mode==MPI_LOCK_EXCLUSIVE){
- xbt_mutex_release(target_win->lock_mut_);
+ target_win->lock_mut_->unlock();
}
int finished = finish_comms(rank);
int i=0;
int retval = MPI_SUCCESS;
for (i=0; i<comm_->size();i++){
- int ret = this->unlock(i);
- if(ret != MPI_SUCCESS)
- retval = ret;
+ int ret = this->unlock(i);
+ if (ret != MPI_SUCCESS)
+ retval = ret;
}
return retval;
}
int Win::flush(int rank){
MPI_Win target_win = connected_wins_[rank];
- int finished = finish_comms(rank);
+ int finished = finish_comms(rank_);
XBT_DEBUG("Win_flush on local %d - Finished %d RMA calls", rank_, finished);
- finished = target_win->finish_comms(rank_);
+ finished = target_win->finish_comms(rank);
XBT_DEBUG("Win_flush on remote %d - Finished %d RMA calls", rank, finished);
return MPI_SUCCESS;
}
}
int Win::flush_all(){
- int i=0;
- int finished = 0;
- finished = finish_comms();
+ int finished = finish_comms();
XBT_DEBUG("Win_flush_all on local - Finished %d RMA calls", finished);
- for (i=0; i<comm_->size();i++){
+ for (int i = 0; i < comm_->size(); i++) {
finished = connected_wins_[i]->finish_comms(rank_);
XBT_DEBUG("Win_flush_all on %d - Finished %d RMA calls", i, finished);
}
return static_cast<Win*>(F2C::f2c(id));
}
-
int Win::finish_comms(){
- xbt_mutex_acquire(mut_);
+ mut_->lock();
//Finish own requests
std::vector<MPI_Request> *reqqs = requests_;
int size = static_cast<int>(reqqs->size());
Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
reqqs->clear();
}
- xbt_mutex_release(mut_);
+ mut_->unlock();
return size;
}
int Win::finish_comms(int rank){
- xbt_mutex_acquire(mut_);
+ mut_->lock();
//Finish own requests
std::vector<MPI_Request> *reqqs = requests_;
int size = static_cast<int>(reqqs->size());
size = 0;
std::vector<MPI_Request> myreqqs;
std::vector<MPI_Request>::iterator iter = reqqs->begin();
+ int proc_id = comm_->group()->actor(rank)->get_pid();
while (iter != reqqs->end()){
- if(((*iter)!=MPI_REQUEST_NULL) && (((*iter)->src() == rank) || ((*iter)->dst() == rank))){
+ // Let's see if we're either the destination or the sender of this request
+ // because we only wait for requests that we are responsible for.
+ // Also use the process id here since the request itself returns from src()
+ // and dst() the process id, NOT the rank (which only exists in the context of a communicator).
+ if (((*iter) != MPI_REQUEST_NULL) && (((*iter)->src() == proc_id) || ((*iter)->dst() == proc_id))) {
myreqqs.push_back(*iter);
iter = reqqs->erase(iter);
size++;
myreqqs.clear();
}
}
- xbt_mutex_release(mut_);
+ mut_->unlock();
return size;
}
-
+int Win::shared_query(int rank, MPI_Aint* size, int* disp_unit, void* baseptr)
+{
+ MPI_Win target_win = rank != MPI_PROC_NULL ? connected_wins_[rank] : nullptr;
+ for (int i = 0; not target_win && i < comm_->size(); i++) {
+ if (connected_wins_[i]->size_ > 0)
+ target_win = connected_wins_[i];
+ }
+ if (target_win) {
+ *size = target_win->size_;
+ *disp_unit = target_win->disp_unit_;
+ *static_cast<void**>(baseptr) = target_win->base_;
+ } else {
+ *size = 0;
+ *static_cast<void**>(baseptr) = xbt_malloc(0);
+ }
+ return MPI_SUCCESS;
+}
}
}