Also allow calls to MPI_Barrier to finalize pending RMA calls.
}
+void Comm::add_rma_win(MPI_Win win){
+ rma_wins_.push_back(win);
+}
+
+void Comm::remove_rma_win(MPI_Win win){
+ rma_wins_.remove(win);
+}
+
+void Comm::finish_rma_calls(){
+ for(auto it : rma_wins_){
+ if(it->rank()==this->rank()){//is it ours (for MPI_COMM_WORLD)?
+ int finished = it->finish_comms();
+ XBT_DEBUG("Barrier for rank %d - Finished %d RMA calls",this->rank(), finished);
+ }
+ }
+}
+
+
}
}
#define SMPI_COMM_HPP_INCLUDED
#include "private.h"
-
+#include <list>
namespace simgrid{
namespace smpi{
int* non_uniform_map_; //set if smp nodes have a different number of processes allocated
int is_blocked_;// are ranks allocated on the same smp node contiguous ?
+ std::list<MPI_Win> rma_wins_; // attached windows for synchronization.
+
public:
static std::unordered_map<int, smpi_key_elem> keyvals_;
static int keyval_id_;
static int keyval_free(int* keyval);
static void keyval_cleanup();
+ void add_rma_win(MPI_Win win);
+ void remove_rma_win(MPI_Win win);
+ void finish_rma_calls();
+
};
}
WRAPPED_PMPI_CALL(int,MPI_Win_set_name,(MPI_Win win, char * name),(win, name))
WRAPPED_PMPI_CALL(int,MPI_Win_start,(MPI_Group group, int assert, MPI_Win win),(group, assert, win))
WRAPPED_PMPI_CALL(int,MPI_Win_wait,(MPI_Win win),(win))
+WRAPPED_PMPI_CALL(int,MPI_Win_lock,(int lock_type, int rank, int assert, MPI_Win win) ,(lock_type, rank, assert, win))
+WRAPPED_PMPI_CALL(int,MPI_Win_unlock,(int rank, MPI_Win win),(rank, win))
WRAPPED_PMPI_CALL(int,MPI_Win_get_attr, (MPI_Win type, int type_keyval, void *attribute_val, int* flag), (type, type_keyval, attribute_val, flag))
WRAPPED_PMPI_CALL(int,MPI_Win_set_attr, (MPI_Win type, int type_keyval, void *att), (type, type_keyval, att))
WRAPPED_PMPI_CALL(int,MPI_Win_delete_attr, (MPI_Win type, int comm_keyval), (type, comm_keyval))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Type_match_size,(int typeclass,int size,MPI_Datatype *datatype),(typeclass,size,datatype))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Unpack_external,(char *datarep, void *inbuf, MPI_Aint insize, MPI_Aint *position, void *outbuf, int outcount, MPI_Datatype datatype),( datarep, inbuf, insize, position, outbuf, outcount, datatype))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Unpublish_name,( char *service_name, MPI_Info info, char *port_name),( service_name, info, port_name))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Win_lock,(int lock_type, int rank, int assert, MPI_Win win) ,(lock_type, rank, assert, win))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Win_set_errhandler,(MPI_Win win, MPI_Errhandler errhandler) ,(win, errhandler))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Win_test,(MPI_Win win, int *flag),(win, flag))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Win_unlock,(int rank, MPI_Win win),(rank, win))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(MPI_Errhandler, MPI_Errhandler_f2c,(MPI_Fint errhandler),(errhandler))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(MPI_Fint, MPI_Errhandler_c2f,(MPI_Errhandler errhandler),(errhandler))
TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
Colls::barrier(comm);
+
+ //Barrier can be used to synchronize RMA calls. Finish all requests from comm before.
+ comm->finish_rma_calls();
+
retval = MPI_SUCCESS;
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
return retval;
}
+int PMPI_Win_lock(int lock_type, int rank, int assert, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (lock_type != MPI_LOCK_EXCLUSIVE &&
+ lock_type != MPI_LOCK_SHARED) {
+ retval = MPI_ERR_LOCKTYPE;
+ } else if (rank == MPI_PROC_NULL){
+ retval = MPI_SUCCESS;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->lock(lock_type,rank,assert);
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_unlock(int rank, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (rank == MPI_PROC_NULL){
+ retval = MPI_SUCCESS;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->unlock(rank);
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
int PMPI_Alloc_mem(MPI_Aint size, MPI_Info info, void *baseptr){
void *ptr = xbt_malloc(size);
if(ptr==nullptr)
finish_wait(&requests[index],status);
if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & NON_PERSISTENT))
requests[index] = MPI_REQUEST_NULL;
- }else{
- XBT_WARN("huu?");
}
}
}
Win::Win(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm): base_(base), size_(size), disp_unit_(disp_unit), assert_(0), info_(info), comm_(comm){
int comm_size = comm->size();
- int rank = comm->rank();
+ rank_ = comm->rank();
XBT_DEBUG("Creating window");
if(info!=MPI_INFO_NULL)
info->ref();
group_ = MPI_GROUP_NULL;
requests_ = new std::vector<MPI_Request>();
mut_=xbt_mutex_init();
+ lock_mut_=xbt_mutex_init();
connected_wins_ = new MPI_Win[comm_size];
- connected_wins_[rank] = this;
+ connected_wins_[rank_] = this;
count_ = 0;
- if(rank==0){
+ if(rank_==0){
bar_ = MSG_barrier_init(comm_size);
}
- Colls::allgather(&(connected_wins_[rank]), sizeof(MPI_Win), MPI_BYTE, connected_wins_, sizeof(MPI_Win),
+
+ comm->add_rma_win(this);
+
+ Colls::allgather(&(connected_wins_[rank_]), sizeof(MPI_Win), MPI_BYTE, connected_wins_, sizeof(MPI_Win),
MPI_BYTE, comm);
Colls::bcast(&(bar_), sizeof(msg_bar_t), MPI_BYTE, 0, comm);
Win::~Win(){
//As per the standard, perform a barrier to ensure every async comm is finished
MSG_barrier_wait(bar_);
- xbt_mutex_acquire(mut_);
+
+ int finished = finish_comms();
+ XBT_DEBUG("Win destructor - Finished %d RMA calls", finished);
+
delete requests_;
- xbt_mutex_release(mut_);
delete[] connected_wins_;
if (name_ != nullptr){
xbt_free(name_);
MPI_Info_free(&info_);
}
+ comm_->remove_rma_win(this);
+
Colls::barrier(comm_);
int rank=comm_->rank();
if(rank == 0)
MSG_barrier_destroy(bar_);
xbt_mutex_destroy(mut_);
+ xbt_mutex_destroy(lock_mut_);
cleanup_attr<Win>();
}
}
}
+int Win::rank(){
+ return rank_;
+}
+
MPI_Aint Win::size(){
return size_;
}
int Win::put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype)
{
- if(opened_==0)//check that post/start has been done
- return MPI_ERR_WIN;
//get receiver pointer
MPI_Win recv_win = connected_wins_[target_rank];
+ if(opened_==0){//check that post/start has been done
+ // no fence or start .. lock ok ?
+ int locked=0;
+ for(auto it : recv_win->lockers_)
+ if (it == comm_->rank())
+ locked = 1;
+ if(locked != 1)
+ return MPI_ERR_WIN;
+ }
+
if(target_count*target_datatype->get_extent()>recv_win->size_)
return MPI_ERR_ARG;
int Win::get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype)
{
- if(opened_==0)//check that post/start has been done
- return MPI_ERR_WIN;
//get sender pointer
MPI_Win send_win = connected_wins_[target_rank];
+ if(opened_==0){//check that post/start has been done
+ // no fence or start .. lock ok ?
+ int locked=0;
+ for(auto it : send_win->lockers_)
+ if (it == comm_->rank())
+ locked = 1;
+ if(locked != 1)
+ return MPI_ERR_WIN;
+ }
+
if(target_count*target_datatype->get_extent()>send_win->size_)
return MPI_ERR_ARG;
int Win::accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op)
{
- if(opened_==0)//check that post/start has been done
- return MPI_ERR_WIN;
- //FIXME: local version
+
//get receiver pointer
MPI_Win recv_win = connected_wins_[target_rank];
+ if(opened_==0){//check that post/start has been done
+ // no fence or start .. lock ok ?
+ int locked=0;
+ for(auto it : recv_win->lockers_)
+ if (it == comm_->rank())
+ locked = 1;
+ if(locked != 1)
+ return MPI_ERR_WIN;
+ }
+ //FIXME: local version
+
if(target_count*target_datatype->get_extent()>recv_win->size_)
return MPI_ERR_ARG;
}
xbt_free(reqs);
- //now we can finish RMA calls
- xbt_mutex_acquire(mut_);
- std::vector<MPI_Request> *reqqs = requests_;
- size = static_cast<int>(reqqs->size());
-
- XBT_DEBUG("Win_complete - Finishing %d RMA calls", size);
- if (size > 0) {
- // start all requests that have been prepared by another process
- for (const auto& req : *reqqs) {
- if (req && (req->flags() & PREPARED))
- req->start();
- }
-
- MPI_Request* treqs = &(*reqqs)[0];
- Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
- reqqs->clear();
- }
- xbt_mutex_release(mut_);
+ int finished = finish_comms();
+ XBT_DEBUG("Win_complete - Finished %d RMA calls", finished);
Group::unref(group_);
opened_--; //we're closed for business !
Request::unref(&reqs[i]);
}
xbt_free(reqs);
- xbt_mutex_acquire(mut_);
- std::vector<MPI_Request> *reqqs = requests_;
- size = static_cast<int>(reqqs->size());
+ int finished = finish_comms();
+ XBT_DEBUG("Win_wait - Finished %d RMA calls", finished);
+
+ Group::unref(group_);
+ opened_--; //we're opened for business !
+ return MPI_SUCCESS;
+}
+
+int Win::lock(int lock_type, int rank, int assert){
+ MPI_Win target_win = connected_wins_[rank];
+
+ int finished = finish_comms();
+ XBT_DEBUG("Win_lock - Finished %d RMA calls", finished);
+
+ //window already locked, we have to wait
+ if (lock_type == MPI_LOCK_EXCLUSIVE)
+ xbt_mutex_acquire(target_win->lock_mut_);
+
+ xbt_mutex_acquire(target_win->mut_);
+ target_win->lockers_.push_back(comm_->rank());
+ xbt_mutex_release(target_win->mut_);
- XBT_DEBUG("Win_wait - Finishing %d RMA calls", size);
+ return MPI_SUCCESS;
+}
+
+int Win::unlock(int rank){
+ MPI_Win target_win = connected_wins_[rank];
+
+ int finished = finish_comms();
+ XBT_DEBUG("Win_unlock - Finished %d RMA calls", finished);
+
+ xbt_mutex_acquire(target_win->mut_);
+ target_win->lockers_.remove(comm_->rank());
+ xbt_mutex_release(target_win->mut_);
+
+ xbt_mutex_try_acquire(target_win->lock_mut_);
+ xbt_mutex_release(target_win->lock_mut_);
+ return MPI_SUCCESS;
+}
+
+Win* Win::f2c(int id){
+ return static_cast<Win*>(F2C::f2c(id));
+}
+
+
+int Win::finish_comms(){
+ //Finish own requests
+ std::vector<MPI_Request> *reqqs = requests_;
+ int size = static_cast<int>(reqqs->size());
if (size > 0) {
+ xbt_mutex_acquire(mut_);
// start all requests that have been prepared by another process
for (const auto& req : *reqqs) {
if (req && (req->flags() & PREPARED))
MPI_Request* treqs = &(*reqqs)[0];
Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
reqqs->clear();
+ xbt_mutex_release(mut_);
}
- xbt_mutex_release(mut_);
- Group::unref(group_);
- opened_--; //we're opened for business !
- return MPI_SUCCESS;
+ return size;
}
-Win* Win::f2c(int id){
- return static_cast<Win*>(F2C::f2c(id));
-}
}
}
#include "private.h"
#include <vector>
+#include <list>
namespace simgrid{
namespace smpi{
int opened_;
MPI_Group group_;
int count_; //for ordering the accs
+ xbt_mutex_t lock_mut_;
+ std::list<int> lockers_;
+ int rank_; // to identify owner for barriers in MPI_COMM_WORLD
public:
static std::unordered_map<int, smpi_key_elem> keyvals_;
void get_name( char* name, int* length);
void get_group( MPI_Group* group);
void set_name( char* name);
+ int rank();
int start(MPI_Group group, int assert);
int post(MPI_Group group, int assert);
int complete();
int accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op);
static Win* f2c(int id);
+ int lock(int lock_type, int rank, int assert);
+ int unlock(int rank);
+ int finish_comms();
};