namespace simgrid{
namespace smpi{
-Request::Request(void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, unsigned flags, MPI_Op op)
- : buf_(buf), old_type_(datatype), src_(src), dst_(dst), tag_(tag), comm_(comm), flags_(flags), op_(op)
+Request::Request(const void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, unsigned flags, MPI_Op op)
+ : buf_(const_cast<void*>(buf)), old_type_(datatype), src_(src), dst_(dst), tag_(tag), comm_(comm), flags_(flags), op_(op)
{
void *old_buf = nullptr;
// FIXME Handle the case of a partial shared malloc.
if ((((flags & MPI_REQ_RECV) != 0) && ((flags & MPI_REQ_ACCUMULATE) != 0)) || (datatype->flags() & DT_FLAG_DERIVED)) {
// This part handles the problem of non-contiguous memory
- old_buf = buf;
+ old_buf = const_cast<void*>(buf);
if (count==0){
buf_ = nullptr;
}else {
if((*request)->refcount_==0){
if ((*request)->flags_ & MPI_REQ_GENERALIZED){
((*request)->generalized_funcs)->free_fn(((*request)->generalized_funcs)->extra_state);
+ delete (*request)->generalized_funcs;
}else{
Comm::unref((*request)->comm_);
Datatype::unref((*request)->old_type_);
{
MPI_Request ref = static_cast<MPI_Request>(a);
MPI_Request req = static_cast<MPI_Request>(b);
- XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d",ref->src_,req->src_, ref->tag_, req->tag_);
+ XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d, id %d against %d",ref->src_,req->src_, ref->tag_, req->tag_,ref->comm_->id(),req->comm_->id());
xbt_assert(ref, "Cannot match recv against null reference");
xbt_assert(req, "Cannot match recv against null request");
- if((ref->src_ == MPI_ANY_SOURCE || req->src_ == ref->src_)
+ if((ref->comm_->id()==MPI_UNDEFINED || req->comm_->id() == MPI_UNDEFINED || (ref->comm_->id()==req->comm_->id()))
+ && ((ref->src_ == MPI_ANY_SOURCE && (ref->comm_->group()->rank(req->src_) != MPI_UNDEFINED)) || req->src_ == ref->src_)
&& ((ref->tag_ == MPI_ANY_TAG && req->tag_ >=0) || req->tag_ == ref->tag_)){
//we match, we can transfer some values
if(ref->src_ == MPI_ANY_SOURCE)
{
MPI_Request ref = static_cast<MPI_Request>(a);
MPI_Request req = static_cast<MPI_Request>(b);
- XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d",ref->src_,req->src_, ref->tag_, req->tag_);
+ XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d, id %d against %d",ref->src_,req->src_, ref->tag_, req->tag_,ref->comm_->id(),req->comm_->id());
xbt_assert(ref, "Cannot match send against null reference");
xbt_assert(req, "Cannot match send against null request");
- if((req->src_ == MPI_ANY_SOURCE || req->src_ == ref->src_)
+ if((ref->comm_->id()==MPI_UNDEFINED || req->comm_->id() == MPI_UNDEFINED || (ref->comm_->id()==req->comm_->id()))
+ && ((req->src_ == MPI_ANY_SOURCE && (req->comm_->group()->rank(ref->src_) != MPI_UNDEFINED)) || req->src_ == ref->src_)
&& ((req->tag_ == MPI_ANY_TAG && ref->tag_ >=0)|| req->tag_ == ref->tag_)){
if(req->src_ == MPI_ANY_SOURCE)
req->real_src_ = ref->src_;
/* factories, to hide the internal flags from the caller */
-MPI_Request Request::send_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
+MPI_Request Request::send_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
MPI_REQ_PERSISTENT | MPI_REQ_SEND | MPI_REQ_PREPARED);
}
-MPI_Request Request::ssend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
+MPI_Request Request::ssend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
comm->group()->actor(dst)->get_pid(), tag, comm,
MPI_REQ_PERSISTENT | MPI_REQ_SSEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
}
-MPI_Request Request::isend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
+MPI_Request Request::isend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
comm->group()->actor(dst)->get_pid(), tag, comm,
}
-MPI_Request Request::rma_send_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
+MPI_Request Request::rma_send_init(const void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
MPI_Op op)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
}
-MPI_Request Request::isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
+MPI_Request Request::isend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
return request;
}
-MPI_Request Request::issend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
+MPI_Request Request::issend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
request = nullptr;
}
-void Request::send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
+void Request::send(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
request = nullptr;
}
-void Request::ssend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
+void Request::ssend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
request = nullptr;
}
-void Request::sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,int dst, int sendtag,
+void Request::sendrecv(const void *sendbuf, int sendcount, MPI_Datatype sendtype,int dst, int sendtag,
void *recvbuf, int recvcount, MPI_Datatype recvtype, int src, int recvtag,
MPI_Comm comm, MPI_Status * status)
{
Status::empty(status);
*flag = 1;
if (((*request)->flags_ & MPI_REQ_PREPARED) == 0) {
- if ((*request)->action_ != nullptr){
+ if ((*request)->action_ != nullptr && (*request)->cancelled_ != 1){
try{
*flag = simcall_comm_test((*request)->action_);
- }catch (xbt_ex& e) {
+ } catch (const Exception&) {
*flag = 0;
return ret;
}
*count = 0;
for (int i = 0; i < incount; i++) {
- if (requests[i] != MPI_REQUEST_NULL) {
+ if (requests[i] != MPI_REQUEST_NULL && not (requests[i]->flags_ & MPI_REQ_FINISHED)) {
ret = test(&requests[i], pstat, &flag);
if(ret!=MPI_SUCCESS)
error = 1;
if(flag) {
- indices[i] = 1;
- (*count)++;
+ indices[*count] = i;
if (status != MPI_STATUSES_IGNORE)
- status[i] = *pstat;
+ status[*count] = *pstat;
+ (*count)++;
if ((requests[i] != MPI_REQUEST_NULL) && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT))
requests[i] = MPI_REQUEST_NULL;
}
simcall_process_sleep(nsleeps*smpi_test_sleep);
try{
i = simcall_comm_testany(comms.data(), comms.size()); // The i-th element in comms matches!
- }catch (xbt_ex& e) {
+ } catch (const Exception&) {
+ XBT_DEBUG("Exception in testany");
return 0;
}
if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & MPI_REQ_NON_PERSISTENT))
requests[*index] = MPI_REQUEST_NULL;
+ XBT_DEBUG("Testany - returning with index %d", *index);
*flag=1;
}
nsleeps = 1;
nsleeps++;
}
} else {
+ XBT_DEBUG("Testany on inactive handles, returning flag=1 but empty status");
//all requests are null or inactive, return true
*flag = 1;
+ *index = MPI_UNDEFINED;
Status::empty(status);
}
nsleeps++;
}
unref(&request);
+ xbt_assert(request == MPI_REQUEST_NULL);
}
void Request::finish_wait(MPI_Request* request, MPI_Status * status)
return;
}
- if (not(req->detached_ && ((req->flags_ & MPI_REQ_SEND) != 0)) && ((req->flags_ & MPI_REQ_PREPARED) == 0) &&
- ((req->flags_ & MPI_REQ_GENERALIZED) == 0)) {
+ if ((req->flags_ & (MPI_REQ_PREPARED | MPI_REQ_GENERALIZED | MPI_REQ_FINISHED)) == 0) {
if(status != MPI_STATUS_IGNORE) {
int src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
status->MPI_SOURCE = req->comm_->group()->rank(src);
// this handles the case were size in receive differs from size in send
status->count = req->real_size_;
}
+ //detached send will be finished at the other end
+ if (not(req->detached_ && ((req->flags_ & MPI_REQ_SEND) != 0))) {
+ req->print_request("Finishing");
+ MPI_Datatype datatype = req->old_type_;
+
+ // FIXME Handle the case of a partial shared malloc.
+ if (((req->flags_ & MPI_REQ_ACCUMULATE) != 0) ||
+ (datatype->flags() & DT_FLAG_DERIVED)) { // && (not smpi_is_shared(req->old_buf_))){
+
+ if (not smpi_process()->replaying() && smpi_privatize_global_variables != SmpiPrivStrategies::NONE &&
+ static_cast<char*>(req->old_buf_) >= smpi_data_exe_start &&
+ static_cast<char*>(req->old_buf_) < smpi_data_exe_start + smpi_data_exe_size) {
+ XBT_VERB("Privatization : We are unserializing to a zone in global memory Switch data segment ");
+ smpi_switch_data_segment(simgrid::s4u::Actor::self());
+ }
- req->print_request("Finishing");
- MPI_Datatype datatype = req->old_type_;
-
-// FIXME Handle the case of a partial shared malloc.
- if (((req->flags_ & MPI_REQ_ACCUMULATE) != 0) ||
- (datatype->flags() & DT_FLAG_DERIVED)) { // && (not smpi_is_shared(req->old_buf_))){
-
- if (not smpi_process()->replaying() && smpi_privatize_global_variables != SmpiPrivStrategies::NONE &&
- static_cast<char*>(req->old_buf_) >= smpi_data_exe_start &&
- static_cast<char*>(req->old_buf_) < smpi_data_exe_start + smpi_data_exe_size) {
- XBT_VERB("Privatization : We are unserializing to a zone in global memory Switch data segment ");
- smpi_switch_data_segment(simgrid::s4u::Actor::self());
- }
-
- if(datatype->flags() & DT_FLAG_DERIVED){
- // This part handles the problem of non-contignous memory the unserialization at the reception
- if ((req->flags_ & MPI_REQ_RECV) && datatype->size() != 0)
- datatype->unserialize(req->buf_, req->old_buf_, req->real_size_/datatype->size() , req->op_);
- xbt_free(req->buf_);
- } else if (req->flags_ & MPI_REQ_RECV) { // apply op on contiguous buffer for accumulate
- if (datatype->size() != 0) {
- int n = req->real_size_ / datatype->size();
- req->op_->apply(req->buf_, req->old_buf_, &n, datatype);
+ if(datatype->flags() & DT_FLAG_DERIVED){
+ // This part handles the problem of non-contignous memory the unserialization at the reception
+ if ((req->flags_ & MPI_REQ_RECV) && datatype->size() != 0)
+ datatype->unserialize(req->buf_, req->old_buf_, req->real_size_/datatype->size() , req->op_);
+ xbt_free(req->buf_);
+ } else if (req->flags_ & MPI_REQ_RECV) { // apply op on contiguous buffer for accumulate
+ if (datatype->size() != 0) {
+ int n = req->real_size_ / datatype->size();
+ req->op_->apply(req->buf_, req->old_buf_, &n, datatype);
+ }
+ xbt_free(req->buf_);
}
- xbt_free(req->buf_);
}
}
}
int count=(*request)->size_/ (*request)->old_type_->size();
(*request)->op_->apply(buf, (*request)->buf_, &count, (*request)->old_type_);
}
- smpi_free_tmp_buffer(buf);
+ smpi_free_tmp_buffer(static_cast<unsigned char*>(buf));
}
}
if((*request)->nbc_requests_[i]!=MPI_REQUEST_NULL)
try{
// this is not a detached send
simcall_comm_wait((*request)->action_, -1.0);
- }catch (xbt_ex& e) {
+ } catch (const Exception&) {
XBT_VERB("Request cancelled");
}
}
try{
// this is not a detached send
i = simcall_comm_waitany(comms.data(), comms.size(), -1);
- }catch (xbt_ex& e) {
- XBT_INFO("request %d cancelled ",i);
+ } catch (const Exception&) {
+ XBT_INFO("request %d cancelled ", i);
return i;
}
int index = 0;
MPI_Status stat;
MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
-
index = waitany(incount, (MPI_Request*)requests, pstat);
if(index==MPI_UNDEFINED) return MPI_UNDEFINED;
if(status != MPI_STATUSES_IGNORE) {
indices[count] = index;
count++;
for (int i = 0; i < incount; i++) {
- if (requests[i] != MPI_REQUEST_NULL) {
+ if (i!=index && requests[i] != MPI_REQUEST_NULL
+ && not(requests[i]->flags_ & MPI_REQ_FINISHED)) {
test(&requests[i], pstat,&flag);
if (flag==1){
indices[count] = i;
(*request)->flags_ |= MPI_REQ_GENERALIZED;
(*request)->flags_ |= MPI_REQ_PERSISTENT;
(*request)->refcount_ = 1;
- ((*request)->generalized_funcs)=xbt_new0(s_smpi_mpi_generalized_request_funcs_t ,1);
+ ((*request)->generalized_funcs) = new s_smpi_mpi_generalized_request_funcs_t;
((*request)->generalized_funcs)->query_fn=query_fn;
((*request)->generalized_funcs)->free_fn=free_fn;
((*request)->generalized_funcs)->cancel_fn=cancel_fn;
}
void Request::set_nbc_requests(MPI_Request* reqs, int size){
- nbc_requests_=reqs;
- nbc_requests_size_=size;
+ nbc_requests_size_ = size;
+ if (size > 0) {
+ nbc_requests_ = reqs;
+ } else {
+ delete[] reqs;
+ nbc_requests_ = nullptr;
+ }
}
int Request::get_nbc_requests_size(){