* under the terms of the license (GNU LGPL) which comes with this package. */
#include "private.h"
-#include "xbt/time.h"
+#include "xbt/virtu.h"
#include "mc/mc.h"
#include "xbt/replay.h"
#include <errno.h>
+#include "simix/smx_private.h"
#include "surf/surf.h"
s_smpi_subtype_t *subtype = datatype->substruct;
if(datatype->has_subtype == 1){
- // This part handles the problem of non-contignous memory
+ // This part handles the problem of non-contiguous memory
old_buf = buf;
buf = malloc(count*smpi_datatype_size(datatype));
if (flags & SEND) {
}
request->buf = buf;
- // This part handles the problem of non-contignous memory (for the
+ // This part handles the problem of non-contiguous memory (for the
// unserialisation at the reception)
request->old_buf = old_buf;
request->old_type = datatype;
"Cannot (re)start a non-finished communication");
if(request->flags & RECV) {
print_request("New recv", request);
- if (request->size < xbt_cfg_get_int(_surf_cfg_set, "smpi/async_small_thres"))
+ if (request->size < surf_cfg_get_int("smpi/async_small_thres"))
mailbox = smpi_process_mailbox_small();
else
mailbox = smpi_process_mailbox();
/* return;*/
/* }*/
print_request("New send", request);
- if (request->size < xbt_cfg_get_int(_surf_cfg_set, "smpi/async_small_thres")) { // eager mode
+ if (request->size < surf_cfg_get_int("smpi/async_small_thres")) { // eager mode
mailbox = smpi_process_remote_mailbox_small(receiver);
}else{
XBT_DEBUG("Send request %p is not in the permanent receive mailbox (buf: %p)",request,request->buf);
if(request->old_type->has_subtype == 0){
oldbuf = request->buf;
request->detached = 1;
- request->buf = malloc(request->size);
- if (oldbuf)
+ if (oldbuf){
+ request->buf = malloc(request->size);
memcpy(request->buf,oldbuf,request->size);
+ }
}
XBT_DEBUG("Send request %p is detached; buf %p copied into %p",request,oldbuf,request->buf);
}
{
MPI_Request req = *request;
// if we have a sender, we should use its data, and not the data from the receive
+ //FIXME : mail fail if req->action has already been freed, the pointer being invalid
if((req->action)&&
(req->src==MPI_ANY_SOURCE || req->tag== MPI_ANY_TAG))
req = (MPI_Request)SIMIX_comm_get_src_data((*request)->action);
if(status != MPI_STATUS_IGNORE) {
status->MPI_SOURCE = req->src;
status->MPI_TAG = req->tag;
+ //if((*request)->action && ((MPI_Request)SIMIX_comm_get_src_data((*request)->action))->size == (*request)->size)
status->MPI_ERROR = MPI_SUCCESS;
+ //else status->MPI_ERROR = MPI_ERR_TRUNCATE;
+ // this handles the case were size in receive differs from size in send
// FIXME: really this should just contain the count of receive-type blocks,
// right?
status->count = req->size;
print_request("New iprobe", request);
// We have to test both mailboxes as we don't know if we will receive one one or another
- if (xbt_cfg_get_int(_surf_cfg_set, "smpi/async_small_thres")>0){
+ if (surf_cfg_get_int("smpi/async_small_thres")>0){
mailbox = smpi_process_mailbox_small();
XBT_DEBUG("trying to probe the perm recv mailbox");
request->action = simcall_comm_iprobe(mailbox, request->src, request->tag, &match_recv, (void*)request);
if(request->action){
MPI_Request req = (MPI_Request)SIMIX_comm_get_src_data(request->action);
- *flag=true;
+ *flag = 1;
if(status != MPI_STATUS_IGNORE) {
status->MPI_SOURCE = req->src;
status->MPI_TAG = req->tag;
status->count = req->size;
}
}
- else *flag=false;
+ else *flag = 0;
smpi_mpi_request_free(&request);
return;
return index;
}
-void smpi_mpi_waitall(int count, MPI_Request requests[],
+int smpi_mpi_waitall(int count, MPI_Request requests[],
MPI_Status status[])
{
int index, c;
MPI_Status stat;
MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
+ int retvalue=MPI_SUCCESS;
//tag invalid requests in the set
for(c = 0; c < count; c++) {
if(requests[c]==MPI_REQUEST_NULL || requests[c]->dst == MPI_PROC_NULL ){
}
for(c = 0; c < count; c++) {
- if(MC_IS_ENABLED) {
+ if(MC_is_active()) {
smpi_mpi_wait(&requests[c], pstat);
index = c;
} else {
}
if(status != MPI_STATUSES_IGNORE) {
memcpy(&status[index], pstat, sizeof(*pstat));
+ if(status[index].MPI_ERROR==MPI_ERR_TRUNCATE)retvalue=MPI_ERR_IN_STATUS;
}
}
}
+ return retvalue;
}
int smpi_mpi_waitsome(int incount, MPI_Request requests[], int *indices,