void smpi_mpi_wait(MPI_Request * request, MPI_Status * status);
int smpi_mpi_waitany(int count, MPI_Request requests[],
MPI_Status * status);
-void smpi_mpi_waitall(int count, MPI_Request requests[],
+int smpi_mpi_waitall(int count, MPI_Request requests[],
MPI_Status status[]);
int smpi_mpi_waitsome(int incount, MPI_Request requests[], int *indices,
MPI_Status status[]);
{
MPI_Request req = *request;
// if we have a sender, we should use its data, and not the data from the receive
+ //FIXME : mail fail if req->action has already been freed, the pointer being invalid
if((req->action)&&
(req->src==MPI_ANY_SOURCE || req->tag== MPI_ANY_TAG))
req = (MPI_Request)SIMIX_comm_get_src_data((*request)->action);
if(status != MPI_STATUS_IGNORE) {
status->MPI_SOURCE = req->src;
status->MPI_TAG = req->tag;
+ //if((*request)->action && ((MPI_Request)SIMIX_comm_get_src_data((*request)->action))->size == (*request)->size)
status->MPI_ERROR = MPI_SUCCESS;
+ //else status->MPI_ERROR = MPI_ERR_TRUNCATE;
+ // this handles the case were size in receive differs from size in send
// FIXME: really this should just contain the count of receive-type blocks,
// right?
status->count = req->size;
if(status != MPI_STATUS_IGNORE) {
status->MPI_SOURCE = req->src;
status->MPI_TAG = req->tag;
- status->MPI_ERROR = MPI_SUCCESS;
- status->count = req->size;
+ if(req->size == request->size)
+ status->MPI_ERROR = MPI_SUCCESS;
+ else status->MPI_ERROR = MPI_ERR_TRUNCATE;
+ status->count = request->size;
}
}
else *flag = 0;
return index;
}
-void smpi_mpi_waitall(int count, MPI_Request requests[],
+int smpi_mpi_waitall(int count, MPI_Request requests[],
MPI_Status status[])
{
int index, c;
MPI_Status stat;
MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
+ int retvalue=MPI_SUCCESS;
//tag invalid requests in the set
for(c = 0; c < count; c++) {
if(requests[c]==MPI_REQUEST_NULL || requests[c]->dst == MPI_PROC_NULL ){
}
if(status != MPI_STATUSES_IGNORE) {
memcpy(&status[index], pstat, sizeof(*pstat));
+ if(status[index].MPI_ERROR==MPI_ERR_TRUNCATE)retvalue=MPI_ERR_IN_STATUS;
}
}
}
+ return retvalue;
}
int smpi_mpi_waitsome(int incount, MPI_Request requests[], int *indices,
if (i >= n) {
index = smpi_group_index(group, rank);
smpi_group_set_mapping(*newgroup, index, rank);
- rank++;
+
}
+ rank++;
}
}
smpi_group_use(*newgroup);
smpi_group_set_mapping(*newgroup, index, newrank);
}
}
+ newrank++; //added to avoid looping, need to be checked ..
}
}
}
MPI_Comm comm, MPI_Status * status)
{
//TODO: suboptimal implementation
- void *recvbuf;
- int retval, size;
+ // void *recvbuf;
+ int retval;
- size = smpi_datatype_size(datatype) * count;
- recvbuf = xbt_new(char, size);
+// size = smpi_datatype_size(datatype) * count;
+// recvbuf = xbt_new(char, size);
retval =
- MPI_Sendrecv(buf, count, datatype, dst, sendtag, recvbuf, count,
+ MPI_Sendrecv(buf, count, datatype, dst, sendtag, buf, count,
datatype, src, recvtag, comm, status);
- memcpy(buf, recvbuf, size * sizeof(char));
- xbt_free(recvbuf);
+/*memcpy(buf, recvbuf, size * sizeof(char));
+ xbt_free(recvbuf);*/
return retval;
}
TRACE_smpi_ptp_in(rank_traced, -1, -1, __FUNCTION__);
#endif
- smpi_mpi_waitall(count, requests, status);
+ int retval = smpi_mpi_waitall(count, requests, status);
#ifdef HAVE_TRACING
for (i = 0; i < count; i++) {
int src_traced, dst_traced, is_wait_for_receive;
TRACE_smpi_computing_in(rank_traced);
#endif
smpi_bench_begin();
- return MPI_SUCCESS;
+ return retval;
}
int PMPI_Waitsome(int incount, MPI_Request requests[], int *outcount,
smpi_bench_end();
strncpy(name, SIMIX_host_get_name(SIMIX_host_self()),
- MPI_MAX_PROCESSOR_NAME - 1);
+ strlen(SIMIX_host_get_name(SIMIX_host_self())) < MPI_MAX_PROCESSOR_NAME - 1 ?
+ strlen(SIMIX_host_get_name(SIMIX_host_self())) +1 :
+ MPI_MAX_PROCESSOR_NAME - 1 );
*resultlen =
strlen(name) >
MPI_MAX_PROCESSOR_NAME ? MPI_MAX_PROCESSOR_NAME : strlen(name);