-/* Copyright (c) 2007-2015. The SimGrid Team.
+/* Copyright (c) 2007-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
std::unordered_map<int, smpi_key_elem> Win::keyvals_;
int Win::keyval_id_=0;
-Win::Win(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm): base_(base), size_(size), disp_unit_(disp_unit), assert_(0), info_(info), comm_(comm){
+Win::Win(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, int allocated, int dynamic): base_(base), size_(size), disp_unit_(disp_unit), assert_(0), info_(info), comm_(comm), allocated_(allocated), dynamic_(dynamic){
int comm_size = comm->size();
rank_ = comm->rank();
XBT_DEBUG("Creating window");
xbt_mutex_destroy(mut_);
xbt_mutex_destroy(lock_mut_);
+ if(allocated_ !=0)
+ xbt_free(base_);
+
cleanup_attr<Win>();
}
+int Win::attach (void *base, MPI_Aint size){
+ if (!(base_ == MPI_BOTTOM || base_ == 0))
+ return MPI_ERR_ARG;
+ base_=0;//actually the address will be given in the RMA calls, as being the disp.
+ size_+=size;
+ return MPI_SUCCESS;
+}
+
+int Win::detach (void *base){
+ base_=MPI_BOTTOM;
+ size_=-1;
+ return MPI_SUCCESS;
+}
+
void Win::get_name(char* name, int* length){
if(name_==nullptr){
*length=0;
}
}
+MPI_Info Win::info(){
+ if(info_== MPI_INFO_NULL)
+ info_ = new Info();
+ info_->ref();
+ return info_;
+}
+
int Win::rank(){
return rank_;
}
return disp_unit_;
}
+int Win::dynamic(){
+ return dynamic_;
+}
+
+void Win::set_info(MPI_Info info){
+ if(info_!= MPI_INFO_NULL)
+ info->ref();
+ info_=info;
+}
void Win::set_name(char* name){
name_ = xbt_strdup(name);
int size = static_cast<int>(reqs->size());
// start all requests that have been prepared by another process
if (size > 0) {
- for (const auto& req : *reqs) {
- if (req && (req->flags() & PREPARED))
- req->start();
- }
-
MPI_Request* treqs = &(*reqs)[0];
-
Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
}
count_=0;
xbt_mutex_release(mut_);
}
+
+ if(assert==MPI_MODE_NOSUCCEED)//there should be no ops after this one, tell we are closed.
+ opened_=0;
assert_ = assert;
MSG_barrier_wait(bar_);
MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, smpi_process()->index(),
comm_->group()->index(target_rank), SMPI_RMA_TAG+1, recv_win->comm_, MPI_OP_NULL);
+ //start send
+ sreq->start();
//push request to receiver's win
xbt_mutex_acquire(recv_win->mut_);
recv_win->requests_->push_back(rreq);
+ rreq->start();
xbt_mutex_release(recv_win->mut_);
- //start send
- sreq->start();
-
//push request to sender's win
xbt_mutex_acquire(mut_);
requests_->push_back(sreq);
smpi_process()->index(), comm_->group()->index(target_rank), SMPI_RMA_TAG-3-count_, recv_win->comm_, op);
count_++;
+
+ //start send
+ sreq->start();
//push request to receiver's win
xbt_mutex_acquire(recv_win->mut_);
recv_win->requests_->push_back(rreq);
+ rreq->start();
xbt_mutex_release(recv_win->mut_);
- //start send
- sreq->start();
-
//push request to sender's win
xbt_mutex_acquire(mut_);
requests_->push_back(sreq);
get(result_addr, result_count, result_datatype, target_rank,
target_disp, target_count, target_datatype);
- accumulate(origin_addr, origin_count, origin_datatype, target_rank,
+ if(op!=MPI_NO_OP)
+ accumulate(origin_addr, origin_count, origin_datatype, target_rank,
target_disp, target_count, target_datatype, op);
return MPI_SUCCESS;
return MPI_SUCCESS;
}
+int Win::lock_all(int assert){
+ int i=0;
+ int retval = MPI_SUCCESS;
+ for (i=0; i<comm_->size();i++){
+ int ret = this->lock(MPI_LOCK_SHARED, i, assert);
+ if(ret != MPI_SUCCESS)
+ retval = ret;
+ }
+ return retval;
+}
+
int Win::unlock(int rank){
if(opened_!=0)
return MPI_ERR_WIN;
return MPI_SUCCESS;
}
+int Win::unlock_all(){
+ int i=0;
+ int retval = MPI_SUCCESS;
+ for (i=0; i<comm_->size();i++){
+ int ret = this->unlock(i);
+ if(ret != MPI_SUCCESS)
+ retval = ret;
+ }
+ return retval;
+}
+
+int Win::flush(int rank){
+ MPI_Win target_win = connected_wins_[rank];
+ int finished = finish_comms(rank);
+ XBT_DEBUG("Win_flush on local %d - Finished %d RMA calls", rank_, finished);
+ finished = target_win->finish_comms(rank_);
+ XBT_DEBUG("Win_flush on remote %d - Finished %d RMA calls", rank, finished);
+ return MPI_SUCCESS;
+}
+
+int Win::flush_local(int rank){
+ int finished = finish_comms(rank);
+ XBT_DEBUG("Win_flush_local for rank %d - Finished %d RMA calls", rank, finished);
+ return MPI_SUCCESS;
+}
+
+int Win::flush_all(){
+ int i=0;
+ int finished = 0;
+ finished = finish_comms();
+ XBT_DEBUG("Win_flush_all on local - Finished %d RMA calls", finished);
+ for (i=0; i<comm_->size();i++){
+ finished = connected_wins_[i]->finish_comms(rank_);
+ XBT_DEBUG("Win_flush_all on %d - Finished %d RMA calls", i, finished);
+ }
+ return MPI_SUCCESS;
+}
+
+int Win::flush_local_all(){
+ int finished = finish_comms();
+ XBT_DEBUG("Win_flush_local_all - Finished %d RMA calls", finished);
+ return MPI_SUCCESS;
+}
+
Win* Win::f2c(int id){
return static_cast<Win*>(F2C::f2c(id));
}
std::vector<MPI_Request> *reqqs = requests_;
int size = static_cast<int>(reqqs->size());
if (size > 0) {
- // start all requests that have been prepared by another process
- for (const auto& req : *reqqs) {
- if (req && (req->flags() & PREPARED))
- req->start();
- }
-
MPI_Request* treqs = &(*reqqs)[0];
Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
reqqs->clear();
return size;
}
+int Win::finish_comms(int rank){
+ xbt_mutex_acquire(mut_);
+ //Finish own requests
+ std::vector<MPI_Request> *reqqs = requests_;
+ int size = static_cast<int>(reqqs->size());
+ if (size > 0) {
+ size = 0;
+ std::vector<MPI_Request>* myreqqs = new std::vector<MPI_Request>();
+ std::vector<MPI_Request>::iterator iter = reqqs->begin();
+ while (iter != reqqs->end()){
+ if(((*iter)->src() == rank) || ((*iter)->dst() == rank)){
+ myreqqs->push_back(*iter);
+ iter = reqqs->erase(iter);
+ size++;
+ } else {
+ ++iter;
+ }
+ }
+ if(size >0){
+ MPI_Request* treqs = &(*myreqqs)[0];
+ Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
+ myreqqs->clear();
+ delete myreqqs;
+ }
+ }
+ xbt_mutex_release(mut_);
+ return size;
+}
+
}
}