-/* Copyright (c) 2007-2020. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2007-2021. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "smpi_request.hpp"
#include "src/smpi/include/smpi_actor.hpp"
+#include <algorithm>
+
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_rma, smpi, "Logging specific to SMPI (RMA operations)");
, disp_unit_(disp_unit)
, info_(info)
, comm_(comm)
+ , connected_wins_(comm->size())
, rank_(comm->rank())
, allocated_(allocated)
, dynamic_(dynamic)
XBT_DEBUG("Creating window");
if(info!=MPI_INFO_NULL)
info->ref();
- int comm_size = comm->size();
- name_ = nullptr;
- opened_ = 0;
- group_ = MPI_GROUP_NULL;
- requests_ = new std::vector<MPI_Request>();
- mut_ = s4u::Mutex::create();
- lock_mut_ = s4u::Mutex::create();
- atomic_mut_ = s4u::Mutex::create();
- connected_wins_ = new MPI_Win[comm_size];
connected_wins_[rank_] = this;
- count_ = 0;
if(rank_==0){
- bar_ = new s4u::Barrier(comm_size);
+ bar_ = new s4u::Barrier(comm->size());
}
- mode_=0;
- errhandler_=MPI_ERRORS_ARE_FATAL;
errhandler_->ref();
comm->add_rma_win(this);
comm->ref();
- colls::allgather(&(connected_wins_[rank_]), sizeof(MPI_Win), MPI_BYTE, connected_wins_, sizeof(MPI_Win), MPI_BYTE,
- comm);
+ colls::allgather(&connected_wins_[rank_], sizeof(MPI_Win), MPI_BYTE, connected_wins_.data(), sizeof(MPI_Win),
+ MPI_BYTE, comm);
colls::bcast(&(bar_), sizeof(s4u::Barrier*), MPI_BYTE, 0, comm);
int finished = finish_comms();
XBT_DEBUG("Win destructor - Finished %d RMA calls", finished);
- delete requests_;
- delete[] connected_wins_;
- if (name_ != nullptr){
- xbt_free(name_);
- }
if (info_ != MPI_INFO_NULL)
simgrid::smpi::Info::unref(info_);
if (errhandler_ != MPI_ERRHANDLER_NULL)
int Win::attach(void* /*base*/, MPI_Aint size)
{
- if (not(base_ == MPI_BOTTOM || base_ == 0))
+ if (not(base_ == MPI_BOTTOM || base_ == nullptr))
return MPI_ERR_ARG;
- base_=0;//actually the address will be given in the RMA calls, as being the disp.
+ base_ = nullptr; // actually the address will be given in the RMA calls, as being the disp.
size_+=size;
return MPI_SUCCESS;
}
void Win::get_name(char* name, int* length) const
{
- if(name_==nullptr){
- *length=0;
- name=nullptr;
- return;
+ *length = static_cast<int>(name_.length());
+ if (not name_.empty()) {
+ name_.copy(name, *length);
+ name[*length] = '\0';
}
- *length = strlen(name_);
- strncpy(name, name_, *length+1);
}
void Win::get_group(MPI_Group* group){
}
void Win::set_name(const char* name){
- name_ = xbt_strdup(name);
+ name_ = name;
}
int Win::fence(int assert)
// Without this, the vector could get redimensioned when another process pushes.
// This would result in the array used by Request::waitall() to be invalidated.
// Another solution would be to copy the data and cleanup the vector *before* Request::waitall
- std::vector<MPI_Request> *reqs = requests_;
- int size = static_cast<int>(reqs->size());
+
// start all requests that have been prepared by another process
- if (size > 0) {
- MPI_Request* treqs = &(*reqs)[0];
+ if (not requests_.empty()) {
+ int size = static_cast<int>(requests_.size());
+ MPI_Request* treqs = requests_.data();
Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
}
count_=0;
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Request* request)
{
//get receiver pointer
- const Win* recv_win = connected_wins_[target_rank];
+ Win* recv_win = connected_wins_[target_rank];
if(opened_==0){//check that post/start has been done
// no fence or start .. lock ok ?
if(target_count*target_datatype->get_extent()>recv_win->size_)
return MPI_ERR_ARG;
- void* recv_addr = static_cast<void*> ( static_cast<char*>(recv_win->base_) + target_disp * recv_win->disp_unit_);
+ void* recv_addr = static_cast<char*>(recv_win->base_) + target_disp * recv_win->disp_unit_;
if (target_rank != comm_->rank()) { // This is not for myself, so we need to send messages
XBT_DEBUG("Entering MPI_Put to remote rank %d", target_rank);
*request=sreq;
}else{
mut_->lock();
- requests_->push_back(sreq);
+ requests_.push_back(sreq);
mut_->unlock();
}
//push request to receiver's win
recv_win->mut_->lock();
- recv_win->requests_->push_back(rreq);
+ recv_win->requests_.push_back(rreq);
rreq->start();
recv_win->mut_->unlock();
} else {
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Request* request)
{
//get sender pointer
- const Win* send_win = connected_wins_[target_rank];
+ Win* send_win = connected_wins_[target_rank];
if(opened_==0){//check that post/start has been done
// no fence or start .. lock ok ?
sreq->start();
//push request to receiver's win
send_win->mut_->lock();
- send_win->requests_->push_back(sreq);
+ send_win->requests_.push_back(sreq);
send_win->mut_->unlock();
//start recv
*request=rreq;
}else{
mut_->lock();
- requests_->push_back(rreq);
+ requests_.push_back(rreq);
mut_->unlock();
}
} else {
{
XBT_DEBUG("Entering MPI_Win_Accumulate");
//get receiver pointer
- const Win* recv_win = connected_wins_[target_rank];
+ Win* recv_win = connected_wins_[target_rank];
if(opened_==0){//check that post/start has been done
// no fence or start .. lock ok ?
if(target_count*target_datatype->get_extent()>recv_win->size_)
return MPI_ERR_ARG;
- void* recv_addr = static_cast<void*>(static_cast<char*>(recv_win->base_) + target_disp * recv_win->disp_unit_);
+ void* recv_addr = static_cast<char*>(recv_win->base_) + target_disp * recv_win->disp_unit_;
XBT_DEBUG("Entering MPI_Accumulate to %d", target_rank);
// As the tag will be used for ordering of the operations, subtract count from it (to avoid collisions with other
// SMPI tags, SMPI_RMA_TAG is set below all the other ones we use)
sreq->start();
// push request to receiver's win
recv_win->mut_->lock();
- recv_win->requests_->push_back(rreq);
+ recv_win->requests_.push_back(rreq);
rreq->start();
recv_win->mut_->unlock();
*request = sreq;
} else {
mut_->lock();
- requests_->push_back(sreq);
+ requests_.push_back(sreq);
mut_->unlock();
}
int i = 0;
int j = 0;
int size = group->size();
- MPI_Request* reqs = xbt_new0(MPI_Request, size);
+ std::vector<MPI_Request> reqs(size);
XBT_DEBUG("Entering MPI_Win_Start");
while (j != size) {
j++;
}
size = i;
- Request::startall(size, reqs);
- Request::waitall(size, reqs, MPI_STATUSES_IGNORE);
+ Request::startall(size, reqs.data());
+ Request::waitall(size, reqs.data(), MPI_STATUSES_IGNORE);
for (i = 0; i < size; i++) {
Request::unref(&reqs[i]);
}
- xbt_free(reqs);
opened_++; //we're open for business !
group_=group;
group->ref();
int i = 0;
int j = 0;
int size = group->size();
- MPI_Request* reqs = xbt_new0(MPI_Request, size);
+ std::vector<MPI_Request> reqs(size);
XBT_DEBUG("Entering MPI_Win_Post");
while(j!=size){
}
size=i;
- Request::startall(size, reqs);
- Request::waitall(size, reqs, MPI_STATUSES_IGNORE);
+ Request::startall(size, reqs.data());
+ Request::waitall(size, reqs.data(), MPI_STATUSES_IGNORE);
for(i=0;i<size;i++){
Request::unref(&reqs[i]);
}
- xbt_free(reqs);
opened_++; //we're open for business !
group_=group;
group->ref();
int i = 0;
int j = 0;
int size = group_->size();
- MPI_Request* reqs = xbt_new0(MPI_Request, size);
+ std::vector<MPI_Request> reqs(size);
while(j!=size){
int dst = comm_->group()->rank(group_->actor(j));
}
size=i;
XBT_DEBUG("Win_complete - Sending sync messages to %d processes", size);
- Request::startall(size, reqs);
- Request::waitall(size, reqs, MPI_STATUSES_IGNORE);
+ Request::startall(size, reqs.data());
+ Request::waitall(size, reqs.data(), MPI_STATUSES_IGNORE);
for(i=0;i<size;i++){
Request::unref(&reqs[i]);
}
- xbt_free(reqs);
int finished = finish_comms();
XBT_DEBUG("Win_complete - Finished %d RMA calls", finished);
int i = 0;
int j = 0;
int size = group_->size();
- MPI_Request* reqs = xbt_new0(MPI_Request, size);
+ std::vector<MPI_Request> reqs(size);
while(j!=size){
int src = comm_->group()->rank(group_->actor(j));
}
size=i;
XBT_DEBUG("Win_wait - Receiving sync messages from %d processes", size);
- Request::startall(size, reqs);
- Request::waitall(size, reqs, MPI_STATUSES_IGNORE);
+ Request::startall(size, reqs.data());
+ Request::waitall(size, reqs.data(), MPI_STATUSES_IGNORE);
for(i=0;i<size;i++){
Request::unref(&reqs[i]);
}
- xbt_free(reqs);
int finished = finish_comms();
XBT_DEBUG("Win_wait - Finished %d RMA calls", finished);
}
int Win::lock_all(int assert){
- int i=0;
int retval = MPI_SUCCESS;
- for (i=0; i<comm_->size();i++){
- int ret = this->lock(MPI_LOCK_SHARED, i, assert);
- if(ret != MPI_SUCCESS)
- retval = ret;
+ for (int i = 0; i < comm_->size(); i++) {
+ int ret = this->lock(MPI_LOCK_SHARED, i, assert);
+ if (ret != MPI_SUCCESS)
+ retval = ret;
}
return retval;
}
}
int Win::unlock_all(){
- int i=0;
int retval = MPI_SUCCESS;
- for (i=0; i<comm_->size();i++){
+ for (int i = 0; i < comm_->size(); i++) {
int ret = this->unlock(i);
if (ret != MPI_SUCCESS)
retval = ret;
int Win::finish_comms(){
mut_->lock();
//Finish own requests
- std::vector<MPI_Request> *reqqs = requests_;
- int size = static_cast<int>(reqqs->size());
+ int size = static_cast<int>(requests_.size());
if (size > 0) {
- MPI_Request* treqs = &(*reqqs)[0];
+ MPI_Request* treqs = requests_.data();
Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
- reqqs->clear();
+ requests_.clear();
}
mut_->unlock();
return size;
int Win::finish_comms(int rank){
mut_->lock();
- //Finish own requests
- std::vector<MPI_Request> *reqqs = requests_;
- int size = static_cast<int>(reqqs->size());
+ // Finish own requests
+ // Let's see if we're either the destination or the sender of this request
+ // because we only wait for requests that we are responsible for.
+ // Also use the process id here since the request itself returns from src()
+ // and dst() the process id, NOT the rank (which only exists in the context of a communicator).
+ int proc_id = comm_->group()->actor(rank)->get_pid();
+ auto it = std::stable_partition(begin(requests_), end(requests_), [proc_id](const MPI_Request& req) {
+ return (req == MPI_REQUEST_NULL || (req->src() != proc_id && req->dst() != proc_id));
+ });
+ std::vector<MPI_Request> myreqqs(it, end(requests_));
+ requests_.erase(it, end(requests_));
+ int size = static_cast<int>(myreqqs.size());
if (size > 0) {
- size = 0;
- std::vector<MPI_Request> myreqqs;
- std::vector<MPI_Request>::iterator iter = reqqs->begin();
- int proc_id = comm_->group()->actor(rank)->get_pid();
- while (iter != reqqs->end()){
- // Let's see if we're either the destination or the sender of this request
- // because we only wait for requests that we are responsible for.
- // Also use the process id here since the request itself returns from src()
- // and dst() the process id, NOT the rank (which only exists in the context of a communicator).
- if (((*iter) != MPI_REQUEST_NULL) && (((*iter)->src() == proc_id) || ((*iter)->dst() == proc_id))) {
- myreqqs.push_back(*iter);
- iter = reqqs->erase(iter);
- size++;
- } else {
- ++iter;
- }
- }
- if(size >0){
- MPI_Request* treqs = &myreqqs[0];
- Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
- myreqqs.clear();
- }
+ MPI_Request* treqs = myreqqs.data();
+ Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
+ myreqqs.clear();
}
mut_->unlock();
return size;
}
-int Win::shared_query(int rank, MPI_Aint* size, int* disp_unit, void* baseptr)
+int Win::shared_query(int rank, MPI_Aint* size, int* disp_unit, void* baseptr) const
{
const Win* target_win = rank != MPI_PROC_NULL ? connected_wins_[rank] : nullptr;
for (int i = 0; not target_win && i < comm_->size(); i++) {