if(info!=MPI_INFO_NULL)
info->ref();
int comm_size = comm->size();
- name_ = nullptr;
opened_ = 0;
group_ = MPI_GROUP_NULL;
requests_ = new std::vector<MPI_Request>();
delete requests_;
delete[] connected_wins_;
- if (name_ != nullptr){
- xbt_free(name_);
- }
if (info_ != MPI_INFO_NULL)
simgrid::smpi::Info::unref(info_);
if (errhandler_ != MPI_ERRHANDLER_NULL)
int Win::attach(void* /*base*/, MPI_Aint size)
{
- if (not(base_ == MPI_BOTTOM || base_ == 0))
+ if (not(base_ == MPI_BOTTOM || base_ == nullptr))
return MPI_ERR_ARG;
- base_=0;//actually the address will be given in the RMA calls, as being the disp.
+ base_ = nullptr; // actually the address will be given in the RMA calls, as being the disp.
size_+=size;
return MPI_SUCCESS;
}
return MPI_SUCCESS;
}
-void Win::get_name(char* name, int* length){
- if(name_==nullptr){
- *length=0;
- name=nullptr;
- return;
+void Win::get_name(char* name, int* length) const
+{
+ *length = name_.length();
+ if (not name_.empty()) {
+ name_.copy(name, *length);
+ name[*length] = '\0';
}
- *length = strlen(name_);
- strncpy(name, name_, *length+1);
}
void Win::get_group(MPI_Group* group){
return info_;
}
-int Win::rank(){
+int Win::rank() const
+{
return rank_;
}
-MPI_Aint Win::size(){
+MPI_Aint Win::size() const
+{
return size_;
}
-void* Win::base(){
+void* Win::base() const
+{
return base_;
}
-int Win::disp_unit(){
+int Win::disp_unit() const
+{
return disp_unit_;
}
-int Win::dynamic(){
+int Win::dynamic() const
+{
return dynamic_;
}
}
void Win::set_name(const char* name){
- name_ = xbt_strdup(name);
+ name_ = name;
}
int Win::fence(int assert)
if(target_count*target_datatype->get_extent()>recv_win->size_)
return MPI_ERR_ARG;
- void* recv_addr = static_cast<void*> ( static_cast<char*>(recv_win->base_) + target_disp * recv_win->disp_unit_);
+ void* recv_addr = static_cast<char*>(recv_win->base_) + target_disp * recv_win->disp_unit_;
if (target_rank != comm_->rank()) { // This is not for myself, so we need to send messages
XBT_DEBUG("Entering MPI_Put to remote rank %d", target_rank);
if(target_count*target_datatype->get_extent()>recv_win->size_)
return MPI_ERR_ARG;
- void* recv_addr = static_cast<void*>(static_cast<char*>(recv_win->base_) + target_disp * recv_win->disp_unit_);
+ void* recv_addr = static_cast<char*>(recv_win->base_) + target_disp * recv_win->disp_unit_;
XBT_DEBUG("Entering MPI_Accumulate to %d", target_rank);
// As the tag will be used for ordering of the operations, subtract count from it (to avoid collisions with other
// SMPI tags, SMPI_RMA_TAG is set below all the other ones we use)
int i = 0;
int j = 0;
int size = group->size();
- MPI_Request* reqs = xbt_new0(MPI_Request, size);
+ std::vector<MPI_Request> reqs(size);
XBT_DEBUG("Entering MPI_Win_Start");
while (j != size) {
j++;
}
size = i;
- Request::startall(size, reqs);
- Request::waitall(size, reqs, MPI_STATUSES_IGNORE);
+ Request::startall(size, reqs.data());
+ Request::waitall(size, reqs.data(), MPI_STATUSES_IGNORE);
for (i = 0; i < size; i++) {
Request::unref(&reqs[i]);
}
- xbt_free(reqs);
opened_++; //we're open for business !
group_=group;
group->ref();
int i = 0;
int j = 0;
int size = group->size();
- MPI_Request* reqs = xbt_new0(MPI_Request, size);
+ std::vector<MPI_Request> reqs(size);
XBT_DEBUG("Entering MPI_Win_Post");
while(j!=size){
}
size=i;
- Request::startall(size, reqs);
- Request::waitall(size, reqs, MPI_STATUSES_IGNORE);
+ Request::startall(size, reqs.data());
+ Request::waitall(size, reqs.data(), MPI_STATUSES_IGNORE);
for(i=0;i<size;i++){
Request::unref(&reqs[i]);
}
- xbt_free(reqs);
opened_++; //we're open for business !
group_=group;
group->ref();
int i = 0;
int j = 0;
int size = group_->size();
- MPI_Request* reqs = xbt_new0(MPI_Request, size);
+ std::vector<MPI_Request> reqs(size);
while(j!=size){
int dst = comm_->group()->rank(group_->actor(j));
}
size=i;
XBT_DEBUG("Win_complete - Sending sync messages to %d processes", size);
- Request::startall(size, reqs);
- Request::waitall(size, reqs, MPI_STATUSES_IGNORE);
+ Request::startall(size, reqs.data());
+ Request::waitall(size, reqs.data(), MPI_STATUSES_IGNORE);
for(i=0;i<size;i++){
Request::unref(&reqs[i]);
}
- xbt_free(reqs);
int finished = finish_comms();
XBT_DEBUG("Win_complete - Finished %d RMA calls", finished);
int i = 0;
int j = 0;
int size = group_->size();
- MPI_Request* reqs = xbt_new0(MPI_Request, size);
+ std::vector<MPI_Request> reqs(size);
while(j!=size){
int src = comm_->group()->rank(group_->actor(j));
}
size=i;
XBT_DEBUG("Win_wait - Receiving sync messages from %d processes", size);
- Request::startall(size, reqs);
- Request::waitall(size, reqs, MPI_STATUSES_IGNORE);
+ Request::startall(size, reqs.data());
+ Request::waitall(size, reqs.data(), MPI_STATUSES_IGNORE);
for(i=0;i<size;i++){
Request::unref(&reqs[i]);
}
- xbt_free(reqs);
int finished = finish_comms();
XBT_DEBUG("Win_wait - Finished %d RMA calls", finished);
}
int Win::lock_all(int assert){
- int i=0;
int retval = MPI_SUCCESS;
- for (i=0; i<comm_->size();i++){
- int ret = this->lock(MPI_LOCK_SHARED, i, assert);
- if(ret != MPI_SUCCESS)
- retval = ret;
+ for (int i = 0; i < comm_->size(); i++) {
+ int ret = this->lock(MPI_LOCK_SHARED, i, assert);
+ if (ret != MPI_SUCCESS)
+ retval = ret;
}
return retval;
}
}
int Win::unlock_all(){
- int i=0;
int retval = MPI_SUCCESS;
- for (i=0; i<comm_->size();i++){
+ for (int i = 0; i < comm_->size(); i++) {
int ret = this->unlock(i);
if (ret != MPI_SUCCESS)
retval = ret;
if (size > 0) {
size = 0;
std::vector<MPI_Request> myreqqs;
- std::vector<MPI_Request>::iterator iter = reqqs->begin();
+ auto iter = reqqs->begin();
int proc_id = comm_->group()->actor(rank)->get_pid();
while (iter != reqqs->end()){
// Let's see if we're either the destination or the sender of this request
return size;
}
-int Win::shared_query(int rank, MPI_Aint* size, int* disp_unit, void* baseptr)
+int Win::shared_query(int rank, MPI_Aint* size, int* disp_unit, void* baseptr) const
{
const Win* target_win = rank != MPI_PROC_NULL ? connected_wins_[rank] : nullptr;
for (int i = 0; not target_win && i < comm_->size(); i++) {