-/* Copyright (c) 2007-2019. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2007-2020. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
std::unordered_map<int, smpi_key_elem> Win::keyvals_;
int Win::keyval_id_=0;
-Win::Win(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, int allocated, int dynamic): base_(base), size_(size), disp_unit_(disp_unit), assert_(0), info_(info), comm_(comm), allocated_(allocated), dynamic_(dynamic){
- int comm_size = comm->size();
- rank_ = comm->rank();
+Win::Win(void* base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, int allocated, int dynamic)
+ : base_(base)
+ , size_(size)
+ , disp_unit_(disp_unit)
+ , info_(info)
+ , comm_(comm)
+ , rank_(comm->rank())
+ , allocated_(allocated)
+ , dynamic_(dynamic)
+{
XBT_DEBUG("Creating window");
if(info!=MPI_INFO_NULL)
info->ref();
+ int comm_size = comm->size();
name_ = nullptr;
opened_ = 0;
group_ = MPI_GROUP_NULL;
bar_ = new s4u::Barrier(comm_size);
}
mode_=0;
-
+ errhandler_=MPI_ERRORS_ARE_FATAL;
+ errhandler_->ref();
comm->add_rma_win(this);
comm->ref();
- Colls::allgather(&(connected_wins_[rank_]), sizeof(MPI_Win), MPI_BYTE, connected_wins_, sizeof(MPI_Win),
- MPI_BYTE, comm);
+ colls::allgather(&(connected_wins_[rank_]), sizeof(MPI_Win), MPI_BYTE, connected_wins_, sizeof(MPI_Win), MPI_BYTE,
+ comm);
- Colls::bcast(&(bar_), sizeof(s4u::Barrier*), MPI_BYTE, 0, comm);
+ colls::bcast(&(bar_), sizeof(s4u::Barrier*), MPI_BYTE, 0, comm);
- Colls::barrier(comm);
+ colls::barrier(comm);
}
Win::~Win(){
if (name_ != nullptr){
xbt_free(name_);
}
- if(info_!=MPI_INFO_NULL){
- MPI_Info_free(&info_);
- }
+ if (info_ != MPI_INFO_NULL)
+ simgrid::smpi::Info::unref(info_);
+ if (errhandler_ != MPI_ERRHANDLER_NULL)
+ simgrid::smpi::Errhandler::unref(errhandler_);
comm_->remove_rma_win(this);
- Colls::barrier(comm_);
+ colls::barrier(comm_);
Comm::unref(comm_);
if (rank_ == 0)
cleanup_attr<Win>();
}
-int Win::attach (void *base, MPI_Aint size){
+int Win::attach(void* /*base*/, MPI_Aint size)
+{
if (not(base_ == MPI_BOTTOM || base_ == 0))
return MPI_ERR_ARG;
base_=0;//actually the address will be given in the RMA calls, as being the disp.
return MPI_SUCCESS;
}
-int Win::detach (void *base){
+int Win::detach(const void* /*base*/)
+{
base_=MPI_BOTTOM;
size_=-1;
return MPI_SUCCESS;
}
}
-MPI_Info Win::info(){
- if(info_== MPI_INFO_NULL)
+MPI_Info Win::info()
+{
+ if (info_ == MPI_INFO_NULL)
info_ = new Info();
info_->ref();
return info_;
return dynamic_;
}
-void Win::set_info(MPI_Info info){
- if(info_!= MPI_INFO_NULL)
- info->ref();
- info_=info;
+void Win::set_info(MPI_Info info)
+{
+ if (info_ != MPI_INFO_NULL)
+ simgrid::smpi::Info::unref(info_);
+ info_ = info;
+ if (info_ != MPI_INFO_NULL)
+ info_->ref();
}
-void Win::set_name(char* name){
+void Win::set_name(const char* name){
name_ = xbt_strdup(name);
}
bar_->wait();
mut_->lock();
// This (simulated) mutex ensures that no process pushes to the vector of requests during the waitall.
- // Without this, the vector could get redimensionned when another process pushes.
+ // Without this, the vector could get redimensioned when another process pushes.
// This would result in the array used by Request::waitall() to be invalidated.
// Another solution would be to copy the data and cleanup the vector *before* Request::waitall
std::vector<MPI_Request> *reqs = requests_;
return MPI_SUCCESS;
}
-int Win::put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
+int Win::put(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Request* request)
{
//get receiver pointer
- MPI_Win recv_win = connected_wins_[target_rank];
+ const Win* recv_win = connected_wins_[target_rank];
if(opened_==0){//check that post/start has been done
// no fence or start .. lock ok ?
recv_win->requests_->push_back(rreq);
rreq->start();
recv_win->mut_->unlock();
-
- }else{
+ } else {
XBT_DEBUG("Entering MPI_Put from myself to myself, rank %d", target_rank);
Datatype::copy(origin_addr, origin_count, origin_datatype, recv_addr, target_count, target_datatype);
if(request!=nullptr)
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Request* request)
{
//get sender pointer
- MPI_Win send_win = connected_wins_[target_rank];
+ const Win* send_win = connected_wins_[target_rank];
if(opened_==0){//check that post/start has been done
// no fence or start .. lock ok ?
if(target_count*target_datatype->get_extent()>send_win->size_)
return MPI_ERR_ARG;
- void* send_addr = static_cast<void*>(static_cast<char*>(send_win->base_) + target_disp * send_win->disp_unit_);
+ const void* send_addr = static_cast<void*>(static_cast<char*>(send_win->base_) + target_disp * send_win->disp_unit_);
XBT_DEBUG("Entering MPI_Get from %d", target_rank);
if(target_rank != comm_->rank()){
requests_->push_back(rreq);
mut_->unlock();
}
-
- }else{
+ } else {
Datatype::copy(send_addr, target_count, target_datatype, origin_addr, origin_count, origin_datatype);
if(request!=nullptr)
*request=MPI_REQUEST_NULL;
}
-
return MPI_SUCCESS;
}
-
-int Win::accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
+int Win::accumulate(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Request* request)
{
XBT_DEBUG("Entering MPI_Win_Accumulate");
//get receiver pointer
- MPI_Win recv_win = connected_wins_[target_rank];
+ const Win* recv_win = connected_wins_[target_rank];
if(opened_==0){//check that post/start has been done
// no fence or start .. lock ok ?
void* recv_addr = static_cast<void*>(static_cast<char*>(recv_win->base_) + target_disp * recv_win->disp_unit_);
XBT_DEBUG("Entering MPI_Accumulate to %d", target_rank);
- //As the tag will be used for ordering of the operations, substract count from it (to avoid collisions with other SMPI tags, SMPI_RMA_TAG is set below all the other ones we use )
- //prepare send_request
+ // As the tag will be used for ordering of the operations, subtract count from it (to avoid collisions with other
+ // SMPI tags, SMPI_RMA_TAG is set below all the other ones we use)
+ // prepare send_request
MPI_Request sreq = Request::rma_send_init(origin_addr, origin_count, origin_datatype, comm_->rank(), target_rank,
SMPI_RMA_TAG - 3 - count_, comm_, op);
return MPI_SUCCESS;
}
-int Win::get_accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr,
- int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count,
- MPI_Datatype target_datatype, MPI_Op op, MPI_Request* request){
-
+int Win::get_accumulate(const void* origin_addr, int origin_count, MPI_Datatype origin_datatype, void* result_addr,
+ int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp,
+ int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Request*)
+{
//get sender pointer
- MPI_Win send_win = connected_wins_[target_rank];
+ const Win* send_win = connected_wins_[target_rank];
if(opened_==0){//check that post/start has been done
// no fence or start .. lock ok ?
Request::wait(&req, MPI_STATUS_IGNORE);
send_win->atomic_mut_->unlock();
return MPI_SUCCESS;
-
}
-int Win::compare_and_swap(void *origin_addr, void *compare_addr,
- void *result_addr, MPI_Datatype datatype, int target_rank,
- MPI_Aint target_disp){
+int Win::compare_and_swap(const void* origin_addr, const void* compare_addr, void* result_addr, MPI_Datatype datatype,
+ int target_rank, MPI_Aint target_disp)
+{
//get sender pointer
- MPI_Win send_win = connected_wins_[target_rank];
+ const Win* send_win = connected_wins_[target_rank];
if(opened_==0){//check that post/start has been done
// no fence or start .. lock ok ?
return MPI_SUCCESS;
}
-int Win::start(MPI_Group group, int assert){
+int Win::start(MPI_Group group, int /*assert*/)
+{
/* From MPI forum advices
The call to MPI_WIN_COMPLETE does not return until the put call has completed at the origin; and the target window
will be accessed by the put operation only after the call to MPI_WIN_START has matched a call to MPI_WIN_POST by
return MPI_SUCCESS;
}
-int Win::post(MPI_Group group, int assert){
+int Win::post(MPI_Group group, int /*assert*/)
+{
//let's make a synchronous send here
int i = 0;
int j = 0;
return MPI_SUCCESS;
}
-int Win::lock(int lock_type, int rank, int assert){
+int Win::lock(int lock_type, int rank, int /*assert*/)
+{
MPI_Win target_win = connected_wins_[rank];
if ((lock_type == MPI_LOCK_EXCLUSIVE && target_win->mode_ != MPI_LOCK_SHARED)|| target_win->mode_ == MPI_LOCK_EXCLUSIVE){
int Win::shared_query(int rank, MPI_Aint* size, int* disp_unit, void* baseptr)
{
- MPI_Win target_win = rank != MPI_PROC_NULL ? connected_wins_[rank] : nullptr;
+ const Win* target_win = rank != MPI_PROC_NULL ? connected_wins_[rank] : nullptr;
for (int i = 0; not target_win && i < comm_->size(); i++) {
if (connected_wins_[i]->size_ > 0)
target_win = connected_wins_[i];
*static_cast<void**>(baseptr) = target_win->base_;
} else {
*size = 0;
- *static_cast<void**>(baseptr) = xbt_malloc(0);
+ *static_cast<void**>(baseptr) = nullptr;
}
return MPI_SUCCESS;
}
+
+MPI_Errhandler Win::errhandler()
+{
+ if (errhandler_ != MPI_ERRHANDLER_NULL)
+ errhandler_->ref();
+ return errhandler_;
}
-}
+
+void Win::set_errhandler(MPI_Errhandler errhandler)
+{
+ if (errhandler_ != MPI_ERRHANDLER_NULL)
+ simgrid::smpi::Errhandler::unref(errhandler_);
+ errhandler_ = errhandler;
+ if (errhandler_ != MPI_ERRHANDLER_NULL)
+ errhandler_->ref();
+}
+} // namespace smpi
+} // namespace simgrid