/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-
+#include "simgrid/s4u/Mutex.hpp"
+#include "simgrid/s4u/ConditionVariable.hpp"
#include "smpi_request.hpp"
#include "mc/mc.h"
#include "smpi_op.hpp"
#include "src/kernel/activity/CommImpl.hpp"
#include "src/mc/mc_replay.hpp"
-#include "src/simix/ActorImpl.hpp"
#include "src/smpi/include/smpi_actor.hpp"
#include "xbt/config.hpp"
+
#include <algorithm>
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_request, smpi, "Logging specific to SMPI (request)");
namespace simgrid{
namespace smpi{
-Request::Request(void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, unsigned flags)
- : buf_(buf), old_type_(datatype), src_(src), dst_(dst), tag_(tag), comm_(comm), flags_(flags)
+Request::Request(const void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, unsigned flags, MPI_Op op)
+ : buf_(const_cast<void*>(buf)), old_type_(datatype), src_(src), dst_(dst), tag_(tag), comm_(comm), flags_(flags), op_(op)
{
void *old_buf = nullptr;
// FIXME Handle the case of a partial shared malloc.
if ((((flags & MPI_REQ_RECV) != 0) && ((flags & MPI_REQ_ACCUMULATE) != 0)) || (datatype->flags() & DT_FLAG_DERIVED)) {
// This part handles the problem of non-contiguous memory
- old_buf = buf;
+ old_buf = const_cast<void*>(buf);
if (count==0){
buf_ = nullptr;
}else {
size_ = datatype->size() * count;
datatype->ref();
comm_->ref();
+ if(op != MPI_REPLACE && op != MPI_OP_NULL)
+ op_->ref();
action_ = nullptr;
- detached_ = 0;
+ detached_ = false;
detached_sender_ = nullptr;
real_src_ = 0;
- truncated_ = 0;
+ truncated_ = false;
real_size_ = 0;
real_tag_ = 0;
if (flags & MPI_REQ_PERSISTENT)
refcount_ = 1;
else
refcount_ = 0;
- op_ = MPI_REPLACE;
cancelled_ = 0;
-}
-
-MPI_Comm Request::comm(){
- return comm_;
-}
-
-int Request::src(){
- return src_;
-}
-
-int Request::dst(){
- return dst_;
-}
-
-int Request::tag(){
- return tag_;
-}
-
-int Request::flags(){
- return flags_;
-}
-
-int Request::detached(){
- return detached_;
-}
-
-MPI_Datatype Request::type(){
- return old_type_;
-}
-
-size_t Request::size(){
- return size_;
-}
-
-size_t Request::real_size(){
- return real_size_;
+ generalized_funcs=nullptr;
+ nbc_requests_=nullptr;
+ nbc_requests_size_=0;
}
void Request::ref(){
xbt_die("Whoops, wrong refcount");
}
if((*request)->refcount_==0){
- Datatype::unref((*request)->old_type_);
+ if ((*request)->flags_ & MPI_REQ_GENERALIZED){
+ ((*request)->generalized_funcs)->free_fn(((*request)->generalized_funcs)->extra_state);
+ delete (*request)->generalized_funcs;
+ }else{
Comm::unref((*request)->comm_);
- (*request)->print_request("Destroying");
- delete *request;
- *request = MPI_REQUEST_NULL;
+ Datatype::unref((*request)->old_type_);
+ }
+ if ((*request)->op_!=MPI_REPLACE && (*request)->op_!=MPI_OP_NULL)
+ Op::unref(&(*request)->op_);
+
+ (*request)->print_request("Destroying");
+ delete *request;
+ *request = MPI_REQUEST_NULL;
}else{
(*request)->print_request("Decrementing");
}
{
MPI_Request ref = static_cast<MPI_Request>(a);
MPI_Request req = static_cast<MPI_Request>(b);
- XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d",ref->src_,req->src_, ref->tag_, req->tag_);
+ XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d, id %d against %d",ref->src_,req->src_, ref->tag_, req->tag_,ref->comm_->id(),req->comm_->id());
xbt_assert(ref, "Cannot match recv against null reference");
xbt_assert(req, "Cannot match recv against null request");
- if((ref->src_ == MPI_ANY_SOURCE || req->src_ == ref->src_)
+ if((ref->comm_->id()==MPI_UNDEFINED || req->comm_->id() == MPI_UNDEFINED || (ref->comm_->id()==req->comm_->id()))
+ && ((ref->src_ == MPI_ANY_SOURCE && (ref->comm_->group()->rank(req->src_) != MPI_UNDEFINED)) || req->src_ == ref->src_)
&& ((ref->tag_ == MPI_ANY_TAG && req->tag_ >=0) || req->tag_ == ref->tag_)){
//we match, we can transfer some values
if(ref->src_ == MPI_ANY_SOURCE)
if(ref->tag_ == MPI_ANY_TAG)
ref->real_tag_ = req->tag_;
if(ref->real_size_ < req->real_size_)
- ref->truncated_ = 1;
- if(req->detached_==1)
+ ref->truncated_ = true;
+ if (req->detached_)
ref->detached_sender_=req; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
if(req->cancelled_==0)
req->cancelled_=-1;//mark as uncancellable
{
MPI_Request ref = static_cast<MPI_Request>(a);
MPI_Request req = static_cast<MPI_Request>(b);
- XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d",ref->src_,req->src_, ref->tag_, req->tag_);
+ XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d, id %d against %d",ref->src_,req->src_, ref->tag_, req->tag_,ref->comm_->id(),req->comm_->id());
xbt_assert(ref, "Cannot match send against null reference");
xbt_assert(req, "Cannot match send against null request");
- if((req->src_ == MPI_ANY_SOURCE || req->src_ == ref->src_)
+ if((ref->comm_->id()==MPI_UNDEFINED || req->comm_->id() == MPI_UNDEFINED || (ref->comm_->id()==req->comm_->id()))
+ && ((req->src_ == MPI_ANY_SOURCE && (req->comm_->group()->rank(ref->src_) != MPI_UNDEFINED)) || req->src_ == ref->src_)
&& ((req->tag_ == MPI_ANY_TAG && ref->tag_ >=0)|| req->tag_ == ref->tag_)){
if(req->src_ == MPI_ANY_SOURCE)
req->real_src_ = ref->src_;
if(req->tag_ == MPI_ANY_TAG)
req->real_tag_ = ref->tag_;
if(req->real_size_ < ref->real_size_)
- req->truncated_ = 1;
- if(ref->detached_==1)
+ req->truncated_ = true;
+ if (ref->detached_)
req->detached_sender_=ref; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
if(req->cancelled_==0)
req->cancelled_=-1;//mark as uncancellable
/* factories, to hide the internal flags from the caller */
-MPI_Request Request::send_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
+MPI_Request Request::send_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
MPI_REQ_PERSISTENT | MPI_REQ_SEND | MPI_REQ_PREPARED);
}
-MPI_Request Request::ssend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
+MPI_Request Request::ssend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
comm->group()->actor(dst)->get_pid(), tag, comm,
MPI_REQ_PERSISTENT | MPI_REQ_SSEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
}
-MPI_Request Request::isend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
+MPI_Request Request::isend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
comm->group()->actor(dst)->get_pid(), tag, comm,
}
-MPI_Request Request::rma_send_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
+MPI_Request Request::rma_send_init(const void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
MPI_Op op)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(),
comm->group()->actor(dst)->get_pid(), tag, comm,
MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED |
- MPI_REQ_ACCUMULATE);
- request->op_ = op;
+ MPI_REQ_ACCUMULATE, op);
}
return request;
}
}else{
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(),
comm->group()->actor(dst)->get_pid(), tag, comm,
- MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED | MPI_REQ_ACCUMULATE);
- request->op_ = op;
+ MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED | MPI_REQ_ACCUMULATE, op);
}
return request;
}
MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
}
-MPI_Request Request::isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
+MPI_Request Request::isend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
return request;
}
-MPI_Request Request::issend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
+MPI_Request Request::issend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
request = nullptr;
}
-void Request::send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
+void Request::send(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
request = nullptr;
}
-void Request::ssend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
+void Request::ssend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
request = nullptr;
}
-void Request::sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,int dst, int sendtag,
+void Request::sendrecv(const void *sendbuf, int sendcount, MPI_Datatype sendtype,int dst, int sendtag,
void *recvbuf, int recvcount, MPI_Datatype recvtype, int src, int recvtag,
MPI_Comm comm, MPI_Status * status)
{
void Request::start()
{
- s4u::MailboxPtr mailbox;
+ s4u::Mailbox* mailbox;
xbt_assert(action_ == nullptr, "Cannot (re-)start unfinished communication");
flags_ &= ~MPI_REQ_PREPARED;
int async_small_thresh = simgrid::config::get_value<int>("smpi/async-small-thresh");
- xbt_mutex_t mut = process->mailboxes_mutex();
+ simgrid::s4u::MutexPtr mut = process->mailboxes_mutex();
if (async_small_thresh != 0 || (flags_ & MPI_REQ_RMA) != 0)
- xbt_mutex_acquire(mut);
+ mut->lock();
if (async_small_thresh == 0 && (flags_ & MPI_REQ_RMA) == 0) {
mailbox = process->mailbox();
XBT_DEBUG("recv simcall posted");
if (async_small_thresh != 0 || (flags_ & MPI_REQ_RMA) != 0)
- xbt_mutex_release(mut);
+ mut->unlock();
} else { /* the RECV flag was not set, so this is a send */
simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
int rank = src_;
((flags_ & MPI_REQ_RMA) != 0 ||
static_cast<int>(size_) < simgrid::config::get_value<int>("smpi/send-is-detached-thresh"))) {
void *oldbuf = nullptr;
- detached_ = 1;
+ detached_ = true;
XBT_DEBUG("Send request %p is detached", this);
this->ref();
if (not(old_type_->flags() & DT_FLAG_DERIVED)) {
//if we are giving back the control to the user without waiting for completion, we have to inject timings
double sleeptime = 0.0;
- if (detached_ != 0 || ((flags_ & (MPI_REQ_ISEND | MPI_REQ_SSEND)) != 0)) { // issend should be treated as isend
+ if (detached_ || ((flags_ & (MPI_REQ_ISEND | MPI_REQ_SSEND)) != 0)) { // issend should be treated as isend
// isend and send timings may be different
sleeptime = ((flags_ & MPI_REQ_ISEND) != 0)
? simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->oisend(size_)
int async_small_thresh = simgrid::config::get_value<int>("smpi/async-small-thresh");
- xbt_mutex_t mut=process->mailboxes_mutex();
+ simgrid::s4u::MutexPtr mut = process->mailboxes_mutex();
if (async_small_thresh != 0 || (flags_ & MPI_REQ_RMA) != 0)
- xbt_mutex_acquire(mut);
+ mut->lock();
if (not(async_small_thresh != 0 || (flags_ & MPI_REQ_RMA) != 0)) {
mailbox = process->mailbox();
/* FIXME: detached sends are not traceable (action_ == nullptr) */
if (action_ != nullptr) {
- std::string category = TRACE_internal_smpi_get_category();
- simgrid::simix::simcall([this, category] { this->action_->set_category(category); });
+ boost::static_pointer_cast<kernel::activity::CommImpl>(action_)->set_tracing_category(
+ smpi_process()->get_tracing_category());
}
if (async_small_thresh != 0 || ((flags_ & MPI_REQ_RMA) != 0))
- xbt_mutex_release(mut);
+ mut->unlock();
}
}
(boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(this->action_))->cancel();
}
-int Request::test(MPI_Request * request, MPI_Status * status) {
+int Request::test(MPI_Request * request, MPI_Status * status, int* flag) {
//assume that request is not MPI_REQUEST_NULL (filtered in PMPI_Test or testall before)
// to avoid deadlocks if used as a break condition, such as
// while (MPI_Test(request, flag, status) && flag) dostuff...
// because the time will not normally advance when only calls to MPI_Test are made -> deadlock
// multiplier to the sleeptime, to increase speed of execution, each failed test will increase it
static int nsleeps = 1;
+ int ret = MPI_SUCCESS;
+
+ // Are we testing a request meant for non blocking collectives ?
+ // If so, test all the subrequests.
+ if ((*request)->nbc_requests_size_>0){
+ ret = testall((*request)->nbc_requests_size_, (*request)->nbc_requests_, flag, MPI_STATUSES_IGNORE);
+ if(*flag){
+ delete[] (*request)->nbc_requests_;
+ (*request)->nbc_requests_size_=0;
+ unref(request);
+ }
+ return ret;
+ }
+
if(smpi_test_sleep > 0)
simcall_process_sleep(nsleeps*smpi_test_sleep);
Status::empty(status);
- int flag = 1;
+ *flag = 1;
if (((*request)->flags_ & MPI_REQ_PREPARED) == 0) {
- if ((*request)->action_ != nullptr){
+ if ((*request)->action_ != nullptr && (*request)->cancelled_ != 1){
try{
- flag = simcall_comm_test((*request)->action_);
- }catch (xbt_ex& e) {
- return 0;
+ *flag = simcall_comm_test((*request)->action_);
+ } catch (const Exception&) {
+ *flag = 0;
+ return ret;
}
}
- if (flag) {
+ if (*request != MPI_REQUEST_NULL &&
+ ((*request)->flags_ & MPI_REQ_GENERALIZED)
+ && !((*request)->flags_ & MPI_REQ_COMPLETE))
+ *flag=0;
+ if (*flag) {
finish_wait(request,status);
+ if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & MPI_REQ_GENERALIZED)){
+ MPI_Status* mystatus;
+ if(status==MPI_STATUS_IGNORE){
+ mystatus=new MPI_Status();
+ Status::empty(mystatus);
+ }else{
+ mystatus=status;
+ }
+ ret = ((*request)->generalized_funcs)->query_fn(((*request)->generalized_funcs)->extra_state, mystatus);
+ if(status==MPI_STATUS_IGNORE)
+ delete mystatus;
+ }
nsleeps=1;//reset the number of sleeps we will do next time
if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & MPI_REQ_PERSISTENT) == 0)
*request = MPI_REQUEST_NULL;
nsleeps++;
}
}
- return flag;
+ return ret;
}
-int Request::testsome(int incount, MPI_Request requests[], int *indices, MPI_Status status[])
+int Request::testsome(int incount, MPI_Request requests[], int *count, int *indices, MPI_Status status[])
{
- int count = 0;
+ int ret = MPI_SUCCESS;
+ int error=0;
int count_dead = 0;
+ int flag = 0;
MPI_Status stat;
MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
+ *count = 0;
for (int i = 0; i < incount; i++) {
- if (requests[i] != MPI_REQUEST_NULL) {
- if (test(&requests[i], pstat)) {
- indices[i] = 1;
- count++;
+ if (requests[i] != MPI_REQUEST_NULL && not (requests[i]->flags_ & MPI_REQ_FINISHED)) {
+ ret = test(&requests[i], pstat, &flag);
+ if(ret!=MPI_SUCCESS)
+ error = 1;
+ if(flag) {
+ indices[*count] = i;
if (status != MPI_STATUSES_IGNORE)
- status[i] = *pstat;
+ status[*count] = *pstat;
+ (*count)++;
if ((requests[i] != MPI_REQUEST_NULL) && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT))
requests[i] = MPI_REQUEST_NULL;
}
count_dead++;
}
}
- if(count_dead==incount)
- return MPI_UNDEFINED;
- else return count;
+ if(count_dead==incount)*count=MPI_UNDEFINED;
+ if(error!=0)
+ return MPI_ERR_IN_STATUS;
+ else
+ return MPI_SUCCESS;
}
-int Request::testany(int count, MPI_Request requests[], int *index, MPI_Status * status)
+int Request::testany(int count, MPI_Request requests[], int *index, int* flag, MPI_Status * status)
{
- std::vector<simgrid::kernel::activity::ActivityImplPtr> comms;
+ std::vector<simgrid::kernel::activity::CommImpl*> comms;
comms.reserve(count);
int i;
- int flag = 0;
-
+ *flag = 0;
+ int ret = MPI_SUCCESS;
*index = MPI_UNDEFINED;
std::vector<int> map; /** Maps all matching comms back to their location in requests **/
for(i = 0; i < count; i++) {
if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action_ && not(requests[i]->flags_ & MPI_REQ_PREPARED)) {
- comms.push_back(requests[i]->action_);
+ comms.push_back(static_cast<simgrid::kernel::activity::CommImpl*>(requests[i]->action_.get()));
map.push_back(i);
}
}
simcall_process_sleep(nsleeps*smpi_test_sleep);
try{
i = simcall_comm_testany(comms.data(), comms.size()); // The i-th element in comms matches!
- }catch (xbt_ex& e) {
+ } catch (const Exception&) {
+ XBT_DEBUG("Exception in testany");
return 0;
}
if (i != -1) { // -1 is not MPI_UNDEFINED but a SIMIX return code. (nothing matches)
*index = map[i];
- finish_wait(&requests[*index],status);
- flag = 1;
- nsleeps = 1;
- if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & MPI_REQ_NON_PERSISTENT)) {
- requests[*index] = MPI_REQUEST_NULL;
+ if (requests[*index] != MPI_REQUEST_NULL &&
+ (requests[*index]->flags_ & MPI_REQ_GENERALIZED)
+ && !(requests[*index]->flags_ & MPI_REQ_COMPLETE)) {
+ *flag=0;
+ } else {
+ finish_wait(&requests[*index],status);
+ if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & MPI_REQ_GENERALIZED)){
+ MPI_Status* mystatus;
+ if(status==MPI_STATUS_IGNORE){
+ mystatus=new MPI_Status();
+ Status::empty(mystatus);
+ }else{
+ mystatus=status;
+ }
+ ret=(requests[*index]->generalized_funcs)->query_fn((requests[*index]->generalized_funcs)->extra_state, mystatus);
+ if(status==MPI_STATUS_IGNORE)
+ delete mystatus;
}
+
+ if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & MPI_REQ_NON_PERSISTENT))
+ requests[*index] = MPI_REQUEST_NULL;
+ XBT_DEBUG("Testany - returning with index %d", *index);
+ *flag=1;
+ }
+ nsleeps = 1;
} else {
nsleeps++;
}
} else {
+ XBT_DEBUG("Testany on inactive handles, returning flag=1 but empty status");
//all requests are null or inactive, return true
- flag = 1;
+ *flag = 1;
+ *index = MPI_UNDEFINED;
Status::empty(status);
}
- return flag;
+ return ret;
}
-int Request::testall(int count, MPI_Request requests[], MPI_Status status[])
+int Request::testall(int count, MPI_Request requests[], int* outflag, MPI_Status status[])
{
MPI_Status stat;
MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
- int flag=1;
+ int flag;
+ int error = 0;
+ int ret=MPI_SUCCESS;
+ *outflag = 1;
for(int i=0; i<count; i++){
if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & MPI_REQ_PREPARED)) {
- if (test(&requests[i], pstat)!=1){
+ ret = test(&requests[i], pstat, &flag);
+ if (flag){
flag=0;
+ requests[i]=MPI_REQUEST_NULL;
}else{
- requests[i]=MPI_REQUEST_NULL;
+ *outflag=0;
}
+ if (ret != MPI_SUCCESS)
+ error = 1;
}else{
Status::empty(pstat);
}
status[i] = *pstat;
}
}
- return flag;
+ if(error==1)
+ return MPI_ERR_IN_STATUS;
+ else
+ return MPI_SUCCESS;
}
void Request::probe(int source, int tag, MPI_Comm comm, MPI_Status* status){
->wait();
}
// behave like a receive, but don't do it
- s4u::MailboxPtr mailbox;
+ s4u::Mailbox* mailbox;
request->print_request("New iprobe");
// We have to test both mailboxes as we don't know if we will receive one one or another
nsleeps++;
}
unref(&request);
+ xbt_assert(request == MPI_REQUEST_NULL);
}
void Request::finish_wait(MPI_Request* request, MPI_Status * status)
return;
}
- if (not((req->detached_ != 0) && ((req->flags_ & MPI_REQ_SEND) != 0)) && ((req->flags_ & MPI_REQ_PREPARED) == 0)) {
+ if ((req->flags_ & (MPI_REQ_PREPARED | MPI_REQ_GENERALIZED | MPI_REQ_FINISHED)) == 0) {
if(status != MPI_STATUS_IGNORE) {
int src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
status->MPI_SOURCE = req->comm_->group()->rank(src);
status->MPI_TAG = req->tag_ == MPI_ANY_TAG ? req->real_tag_ : req->tag_;
- status->MPI_ERROR = req->truncated_ != 0 ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
+ status->MPI_ERROR = req->truncated_ ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
// this handles the case were size in receive differs from size in send
status->count = req->real_size_;
}
+ //detached send will be finished at the other end
+ if (not(req->detached_ && ((req->flags_ & MPI_REQ_SEND) != 0))) {
+ req->print_request("Finishing");
+ MPI_Datatype datatype = req->old_type_;
+
+ // FIXME Handle the case of a partial shared malloc.
+ if (((req->flags_ & MPI_REQ_ACCUMULATE) != 0) ||
+ (datatype->flags() & DT_FLAG_DERIVED)) { // && (not smpi_is_shared(req->old_buf_))){
+
+ if (not smpi_process()->replaying() && smpi_privatize_global_variables != SmpiPrivStrategies::NONE &&
+ static_cast<char*>(req->old_buf_) >= smpi_data_exe_start &&
+ static_cast<char*>(req->old_buf_) < smpi_data_exe_start + smpi_data_exe_size) {
+ XBT_VERB("Privatization : We are unserializing to a zone in global memory Switch data segment ");
+ smpi_switch_data_segment(simgrid::s4u::Actor::self());
+ }
- req->print_request("Finishing");
- MPI_Datatype datatype = req->old_type_;
-
-// FIXME Handle the case of a partial shared malloc.
- if (((req->flags_ & MPI_REQ_ACCUMULATE) != 0) ||
- (datatype->flags() & DT_FLAG_DERIVED)) { // && (not smpi_is_shared(req->old_buf_))){
-
- if (not smpi_process()->replaying() && smpi_privatize_global_variables != SmpiPrivStrategies::NONE &&
- static_cast<char*>(req->old_buf_) >= smpi_data_exe_start &&
- static_cast<char*>(req->old_buf_) < smpi_data_exe_start + smpi_data_exe_size) {
- XBT_VERB("Privatization : We are unserializing to a zone in global memory Switch data segment ");
- smpi_switch_data_segment(simgrid::s4u::Actor::self());
- }
-
- if(datatype->flags() & DT_FLAG_DERIVED){
- // This part handles the problem of non-contignous memory the unserialization at the reception
- if ((req->flags_ & MPI_REQ_RECV) && datatype->size() != 0)
- datatype->unserialize(req->buf_, req->old_buf_, req->real_size_/datatype->size() , req->op_);
- xbt_free(req->buf_);
- } else if (req->flags_ & MPI_REQ_RECV) { // apply op on contiguous buffer for accumulate
- if (datatype->size() != 0) {
- int n = req->real_size_ / datatype->size();
- req->op_->apply(req->buf_, req->old_buf_, &n, datatype);
+ if(datatype->flags() & DT_FLAG_DERIVED){
+ // This part handles the problem of non-contignous memory the unserialization at the reception
+ if ((req->flags_ & MPI_REQ_RECV) && datatype->size() != 0)
+ datatype->unserialize(req->buf_, req->old_buf_, req->real_size_/datatype->size() , req->op_);
+ xbt_free(req->buf_);
+ } else if (req->flags_ & MPI_REQ_RECV) { // apply op on contiguous buffer for accumulate
+ if (datatype->size() != 0) {
+ int n = req->real_size_ / datatype->size();
+ req->op_->apply(req->buf_, req->old_buf_, &n, datatype);
+ }
+ xbt_free(req->buf_);
}
- xbt_free(req->buf_);
}
}
}
unref(request);
}
-void Request::wait(MPI_Request * request, MPI_Status * status)
+int Request::wait(MPI_Request * request, MPI_Status * status)
{
+ int ret=MPI_SUCCESS;
+ // Are we waiting on a request meant for non blocking collectives ?
+ // If so, wait for all the subrequests.
+ if ((*request)->nbc_requests_size_>0){
+ ret = waitall((*request)->nbc_requests_size_, (*request)->nbc_requests_, MPI_STATUSES_IGNORE);
+ for (int i = 0; i < (*request)->nbc_requests_size_; i++) {
+ if((*request)->buf_!=nullptr && (*request)->nbc_requests_[i]!=MPI_REQUEST_NULL){//reduce case
+ void * buf=(*request)->nbc_requests_[i]->buf_;
+ if((*request)->old_type_->flags() & DT_FLAG_DERIVED)
+ buf=(*request)->nbc_requests_[i]->old_buf_;
+ if((*request)->nbc_requests_[i]->flags_ & MPI_REQ_RECV ){
+ if((*request)->op_!=MPI_OP_NULL){
+ int count=(*request)->size_/ (*request)->old_type_->size();
+ (*request)->op_->apply(buf, (*request)->buf_, &count, (*request)->old_type_);
+ }
+ smpi_free_tmp_buffer(static_cast<unsigned char*>(buf));
+ }
+ }
+ if((*request)->nbc_requests_[i]!=MPI_REQUEST_NULL)
+ Request::unref(&((*request)->nbc_requests_[i]));
+ }
+ delete[] (*request)->nbc_requests_;
+ (*request)->nbc_requests_size_=0;
+ unref(request);
+ (*request)=MPI_REQUEST_NULL;
+ return ret;
+ }
+
(*request)->print_request("Waiting");
if ((*request)->flags_ & MPI_REQ_PREPARED) {
Status::empty(status);
- return;
+ return ret;
}
if ((*request)->action_ != nullptr){
try{
// this is not a detached send
simcall_comm_wait((*request)->action_, -1.0);
- }catch (xbt_ex& e) {
+ } catch (const Exception&) {
XBT_VERB("Request cancelled");
}
}
+ if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & MPI_REQ_GENERALIZED)){
+ MPI_Status* mystatus;
+ if(!((*request)->flags_ & MPI_REQ_COMPLETE)){
+ ((*request)->generalized_funcs)->mutex->lock();
+ ((*request)->generalized_funcs)->cond->wait(((*request)->generalized_funcs)->mutex);
+ ((*request)->generalized_funcs)->mutex->unlock();
+ }
+ if(status==MPI_STATUS_IGNORE){
+ mystatus=new MPI_Status();
+ Status::empty(mystatus);
+ }else{
+ mystatus=status;
+ }
+ ret = ((*request)->generalized_funcs)->query_fn(((*request)->generalized_funcs)->extra_state, mystatus);
+ if(status==MPI_STATUS_IGNORE)
+ delete mystatus;
+ }
finish_wait(request,status);
if (*request != MPI_REQUEST_NULL && (((*request)->flags_ & MPI_REQ_NON_PERSISTENT) != 0))
*request = MPI_REQUEST_NULL;
+ return ret;
}
int Request::waitany(int count, MPI_Request requests[], MPI_Status * status)
{
- s_xbt_dynar_t comms; // Keep it on stack to save some extra mallocs
+ std::vector<simgrid::kernel::activity::CommImpl*> comms;
+ comms.reserve(count);
int index = MPI_UNDEFINED;
if(count > 0) {
- int size = 0;
// Wait for a request to complete
- xbt_dynar_init(&comms, sizeof(simgrid::kernel::activity::ActivityImpl*), nullptr);
- int *map = xbt_new(int, count);
+ std::vector<int> map;
XBT_DEBUG("Wait for one of %d", count);
for(int i = 0; i < count; i++) {
if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & MPI_REQ_PREPARED) &&
not(requests[i]->flags_ & MPI_REQ_FINISHED)) {
if (requests[i]->action_ != nullptr) {
XBT_DEBUG("Waiting any %p ", requests[i]);
- xbt_dynar_push_as(&comms, simgrid::kernel::activity::ActivityImpl*, requests[i]->action_.get());
- map[size] = i;
- size++;
+ comms.push_back(static_cast<simgrid::kernel::activity::CommImpl*>(requests[i]->action_.get()));
+ map.push_back(i);
} else {
// This is a finished detached request, let's return this one
- size = 0; // so we free the dynar but don't do the waitany call
+ comms.clear(); // so we free don't do the waitany call
index = i;
finish_wait(&requests[i], status); // cleanup if refcount = 0
if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT))
}
}
}
- if (size > 0) {
- XBT_DEBUG("Enter waitany for %lu comms", xbt_dynar_length(&comms));
+ if (not comms.empty()) {
+ XBT_DEBUG("Enter waitany for %zu comms", comms.size());
int i=MPI_UNDEFINED;
try{
// this is not a detached send
- i = simcall_comm_waitany(&comms, -1);
- }catch (xbt_ex& e) {
- XBT_INFO("request %d cancelled ",i);
+ i = simcall_comm_waitany(comms.data(), comms.size(), -1);
+ } catch (const Exception&) {
+ XBT_INFO("request %d cancelled ", i);
return i;
}
}
}
}
-
- xbt_dynar_free_data(&comms);
- xbt_free(map);
}
if (index==MPI_UNDEFINED)
int Request::waitsome(int incount, MPI_Request requests[], int *indices, MPI_Status status[])
{
int count = 0;
+ int flag = 0;
+ int index = 0;
MPI_Status stat;
MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
-
+ index = waitany(incount, (MPI_Request*)requests, pstat);
+ if(index==MPI_UNDEFINED) return MPI_UNDEFINED;
+ if(status != MPI_STATUSES_IGNORE) {
+ status[count] = *pstat;
+ }
+ indices[count] = index;
+ count++;
for (int i = 0; i < incount; i++) {
- int index = waitany(incount, requests, pstat);
- if(index!=MPI_UNDEFINED){
- indices[count] = index;
- count++;
- if(status != MPI_STATUSES_IGNORE) {
- status[index] = *pstat;
+ if (i!=index && requests[i] != MPI_REQUEST_NULL
+ && not(requests[i]->flags_ & MPI_REQ_FINISHED)) {
+ test(&requests[i], pstat,&flag);
+ if (flag==1){
+ indices[count] = i;
+ if(status != MPI_STATUSES_IGNORE) {
+ status[count] = *pstat;
+ }
+ if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT))
+ requests[i]=MPI_REQUEST_NULL;
+ count++;
}
- if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & MPI_REQ_NON_PERSISTENT))
- requests[index] = MPI_REQUEST_NULL;
- }else{
- return MPI_UNDEFINED;
}
}
return count;
}
}
+
+int Request::get_status(MPI_Request req, int* flag, MPI_Status * status){
+ *flag=0;
+
+ if(req != MPI_REQUEST_NULL && req->action_ != nullptr) {
+ req->iprobe(req->src_, req->tag_, req->comm_, flag, status);
+ if(*flag)
+ return MPI_SUCCESS;
+ }
+ if (req != MPI_REQUEST_NULL &&
+ (req->flags_ & MPI_REQ_GENERALIZED)
+ && !(req->flags_ & MPI_REQ_COMPLETE)) {
+ *flag=0;
+ return MPI_SUCCESS;
+ }
+
+ *flag=1;
+ if(req != MPI_REQUEST_NULL &&
+ status != MPI_STATUS_IGNORE) {
+ int src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
+ status->MPI_SOURCE = req->comm_->group()->rank(src);
+ status->MPI_TAG = req->tag_ == MPI_ANY_TAG ? req->real_tag_ : req->tag_;
+ status->MPI_ERROR = req->truncated_ ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
+ status->count = req->real_size_;
+ }
+ return MPI_SUCCESS;
+}
+
+int Request::grequest_start( MPI_Grequest_query_function *query_fn, MPI_Grequest_free_function *free_fn, MPI_Grequest_cancel_function *cancel_fn, void *extra_state, MPI_Request *request){
+
+ *request = new Request();
+ (*request)->flags_ |= MPI_REQ_GENERALIZED;
+ (*request)->flags_ |= MPI_REQ_PERSISTENT;
+ (*request)->refcount_ = 1;
+ ((*request)->generalized_funcs) = new s_smpi_mpi_generalized_request_funcs_t;
+ ((*request)->generalized_funcs)->query_fn=query_fn;
+ ((*request)->generalized_funcs)->free_fn=free_fn;
+ ((*request)->generalized_funcs)->cancel_fn=cancel_fn;
+ ((*request)->generalized_funcs)->extra_state=extra_state;
+ ((*request)->generalized_funcs)->cond = simgrid::s4u::ConditionVariable::create();
+ ((*request)->generalized_funcs)->mutex = simgrid::s4u::Mutex::create();
+ return MPI_SUCCESS;
+}
+
+int Request::grequest_complete( MPI_Request request){
+ if ((!(request->flags_ & MPI_REQ_GENERALIZED)) || request->generalized_funcs->mutex==NULL)
+ return MPI_ERR_REQUEST;
+ request->generalized_funcs->mutex->lock();
+ request->flags_ |= MPI_REQ_COMPLETE; // in case wait would be called after complete
+ request->generalized_funcs->cond->notify_one();
+ request->generalized_funcs->mutex->unlock();
+ return MPI_SUCCESS;
+}
+
+void Request::set_nbc_requests(MPI_Request* reqs, int size){
+ nbc_requests_size_ = size;
+ if (size > 0) {
+ nbc_requests_ = reqs;
+ } else {
+ delete[] reqs;
+ nbc_requests_ = nullptr;
+ }
+}
+
+int Request::get_nbc_requests_size(){
+ return nbc_requests_size_;
+}
+
+MPI_Request* Request::get_nbc_requests(){
+ return nbc_requests_;
+}
+
}
}