- if (*datatype == smpi_mpi_global->mpi_int) {
- int *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] && y[i];
- }
- }
-}
-
-/**
- * sum two vectors element-wise
- *
- * @param a the first vectors
- * @param b the second vectors
- * @return the second vector is modified and contains the element-wise sums
- **/
-void smpi_mpi_sum_func(void *a, void *b, int *length,
- MPI_Datatype * datatype);
-
-void smpi_mpi_sum_func(void *a, void *b, int *length, MPI_Datatype * datatype)
-{
- int i;
- if (*datatype == smpi_mpi_global->mpi_byte) {
- char *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] + y[i];
- }
- } else if (*datatype == smpi_mpi_global->mpi_int) {
- int *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] + y[i];
- }
- } else if (*datatype == smpi_mpi_global->mpi_float) {
- float *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] + y[i];
- }
- } else if (*datatype == smpi_mpi_global->mpi_double) {
- double *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] + y[i];
- }
- }
-}
-/**
- * compute the min of two vectors element-wise
- **/
-void smpi_mpi_min_func(void *a, void *b, int *length, MPI_Datatype * datatype);
-
-void smpi_mpi_min_func(void *a, void *b, int *length, MPI_Datatype * datatype)
-{
- int i;
- if (*datatype == smpi_mpi_global->mpi_byte) {
- char *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] < y[i] ? x[i] : y[i];
- }
- } else {
- if (*datatype == smpi_mpi_global->mpi_int) {
- int *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] < y[i] ? x[i] : y[i];
- }
- } else {
- if (*datatype == smpi_mpi_global->mpi_float) {
- float *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] < y[i] ? x[i] : y[i];
- }
- } else {
- if (*datatype == smpi_mpi_global->mpi_double) {
- double *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] < y[i] ? x[i] : y[i];
- }
-
- }}}}
-}
-/**
- * compute the max of two vectors element-wise
- **/
-void smpi_mpi_max_func(void *a, void *b, int *length, MPI_Datatype * datatype);
-
-void smpi_mpi_max_func(void *a, void *b, int *length, MPI_Datatype * datatype)
-{
- int i;
- if (*datatype == smpi_mpi_global->mpi_byte) {
- char *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] > y[i] ? x[i] : y[i];
- }
- } else if (*datatype == smpi_mpi_global->mpi_int) {
- int *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] > y[i] ? x[i] : y[i];
- }
- } else if (*datatype == smpi_mpi_global->mpi_float) {
- float *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] > y[i] ? x[i] : y[i];
- }
- } else if (*datatype == smpi_mpi_global->mpi_double) {
- double *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] > y[i] ? x[i] : y[i];
- }
-
- }
-}
-
-
-
-
-/**
- * tell the MPI rank of the calling process (from its SIMIX process id)
- **/
-int smpi_mpi_comm_rank(smpi_mpi_communicator_t comm)
-{
- return comm->index_to_rank_map[smpi_process_index()];
-}
-
-void smpi_process_init(int *argc, char***argv)
-{
- smpi_process_data_t pdata;
-
- // initialize some local variables
-
- pdata = xbt_new(s_smpi_process_data_t, 1);
- SIMIX_process_set_data(SIMIX_process_self(),pdata);
-
- /* get rank from command line, and remove it from argv */
- pdata->index = atoi( (*argv)[1] );
- DEBUG1("I'm rank %d",pdata->index);
- if (*argc>2) {
- memmove((*argv)[1],(*argv)[2], sizeof(char*)* (*argc-2));
- (*argv)[ (*argc)-1] = NULL;
- }
- (*argc)--;
-
- pdata->mutex = SIMIX_mutex_init();
- pdata->cond = SIMIX_cond_init();
- pdata->finalize = 0;
-
- pdata->pending_recv_request_queue = xbt_fifo_new();
- pdata->pending_send_request_queue = xbt_fifo_new();
- pdata->received_message_queue = xbt_fifo_new();
-
- pdata->main = SIMIX_process_self();
- pdata->sender = SIMIX_process_create("smpi_sender",
- smpi_sender, pdata,
- SIMIX_host_get_name(SIMIX_host_self()), 0, NULL,
- /*props */ NULL);
- pdata->receiver = SIMIX_process_create("smpi_receiver",
- smpi_receiver, pdata,
- SIMIX_host_get_name(SIMIX_host_self()), 0, NULL,
- /*props */ NULL);
-
- smpi_global->main_processes[pdata->index] = SIMIX_process_self();
+
+ for(i = 0; i < count; i++) {
+ smpi_mpi_start(requests[i]);
+ }
+}
+
+void smpi_mpi_request_free(MPI_Request * request)
+{
+
+ if((*request) != MPI_REQUEST_NULL){
+ (*request)->refcount--;
+ if((*request)->refcount<0) xbt_die("wrong refcount");
+
+ if((*request)->refcount==0){
+ xbt_free(*request);
+ *request = MPI_REQUEST_NULL;
+ }
+ }else{
+ xbt_die("freeing an already free request");
+ }
+}
+
+MPI_Request smpi_isend_init(void *buf, int count, MPI_Datatype datatype,
+ int dst, int tag, MPI_Comm comm)
+{
+ MPI_Request request =
+ build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
+ comm, NON_PERSISTENT | SEND);
+
+ return request;
+}
+
+MPI_Request smpi_mpi_isend(void *buf, int count, MPI_Datatype datatype,
+ int dst, int tag, MPI_Comm comm)
+{
+ MPI_Request request =
+ build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
+ comm, NON_PERSISTENT | SEND);
+
+ smpi_mpi_start(request);
+ return request;
+}
+
+MPI_Request smpi_irecv_init(void *buf, int count, MPI_Datatype datatype,
+ int src, int tag, MPI_Comm comm)
+{
+ MPI_Request request =
+ build_request(buf, count, datatype, src, smpi_comm_rank(comm), tag,
+ comm, NON_PERSISTENT | RECV);
+ return request;
+}
+
+MPI_Request smpi_mpi_irecv(void *buf, int count, MPI_Datatype datatype,
+ int src, int tag, MPI_Comm comm)
+{
+ MPI_Request request =
+ build_request(buf, count, datatype, src, smpi_comm_rank(comm), tag,
+ comm, NON_PERSISTENT | RECV);
+
+ smpi_mpi_start(request);
+ return request;
+}
+
+void smpi_mpi_recv(void *buf, int count, MPI_Datatype datatype, int src,
+ int tag, MPI_Comm comm, MPI_Status * status)
+{
+ MPI_Request request;
+ request = smpi_mpi_irecv(buf, count, datatype, src, tag, comm);
+ smpi_mpi_wait(&request, status);
+}
+
+
+
+void smpi_mpi_send(void *buf, int count, MPI_Datatype datatype, int dst,
+ int tag, MPI_Comm comm)
+{
+ MPI_Request request;
+ request = smpi_mpi_isend(buf, count, datatype, dst, tag, comm);
+ smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
+
+}
+
+void smpi_mpi_sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
+ int dst, int sendtag, void *recvbuf, int recvcount,
+ MPI_Datatype recvtype, int src, int recvtag,
+ MPI_Comm comm, MPI_Status * status)
+{
+ MPI_Request requests[2];
+ MPI_Status stats[2];
+
+ requests[0] =
+ smpi_isend_init(sendbuf, sendcount, sendtype, dst, sendtag, comm);
+ requests[1] =
+ smpi_irecv_init(recvbuf, recvcount, recvtype, src, recvtag, comm);
+ smpi_mpi_startall(2, requests);
+ smpi_mpi_waitall(2, requests, stats);
+ if(status != MPI_STATUS_IGNORE) {
+ // Copy receive status
+ memcpy(status, &stats[1], sizeof(MPI_Status));
+ }
+}
+
+int smpi_mpi_get_count(MPI_Status * status, MPI_Datatype datatype)
+{
+ return status->count / smpi_datatype_size(datatype);
+}
+
+static void finish_wait(MPI_Request * request, MPI_Status * status)
+{
+ MPI_Request req = *request;
+ if(!(req->detached && req->flags & SEND)){
+ if(status != MPI_STATUS_IGNORE) {
+ status->MPI_SOURCE = req->src == MPI_ANY_SOURCE ? req->real_src : req->src;
+ status->MPI_TAG = req->tag == MPI_ANY_TAG ? req->real_tag : req->tag;
+ if(req->truncated)
+ status->MPI_ERROR = MPI_ERR_TRUNCATE;
+ else status->MPI_ERROR = MPI_SUCCESS ;
+ // this handles the case were size in receive differs from size in send
+ // FIXME: really this should just contain the count of receive-type blocks,
+ // right?
+ status->count = req->real_size;
+ }
+
+ print_request("Finishing", req);
+ MPI_Datatype datatype = req->old_type;
+
+ if(datatype->has_subtype == 1){
+ // This part handles the problem of non-contignous memory
+ // the unserialization at the reception
+ s_smpi_subtype_t *subtype = datatype->substruct;
+ if(req->flags & RECV) {
+ subtype->unserialize(req->buf, req->old_buf, req->real_size/smpi_datatype_size(datatype) , datatype->substruct);
+ }
+ if(req->detached == 0) free(req->buf);
+ }
+ smpi_datatype_unuse(datatype);
+ }
+
+ if(req->detached_sender!=NULL){
+ smpi_mpi_request_free(&(req->detached_sender));
+ }
+
+ if(req->flags & NON_PERSISTENT) {
+ smpi_mpi_request_free(request);
+ } else {
+ req->action = NULL;
+ }
+}
+
+int smpi_mpi_test(MPI_Request * request, MPI_Status * status) {
+ int flag;
+
+ //assume that request is not MPI_REQUEST_NULL (filtered in PMPI_Test or smpi_mpi_testall before)
+ if ((*request)->action == NULL)
+ flag = 1;
+ else
+ flag = simcall_comm_test((*request)->action);
+ if(flag) {
+ (*request)->refcount++;
+ finish_wait(request, status);
+ }else{
+ smpi_empty_status(status);
+ }
+ return flag;
+}
+
+int smpi_mpi_testany(int count, MPI_Request requests[], int *index,
+ MPI_Status * status)
+{
+ xbt_dynar_t comms;
+ int i, flag, size;
+ int* map;
+
+ *index = MPI_UNDEFINED;
+ flag = 0;
+ if(count > 0) {
+ comms = xbt_dynar_new(sizeof(smx_action_t), NULL);
+ map = xbt_new(int, count);
+ size = 0;
+ for(i = 0; i < count; i++) {
+ if((requests[i]!=MPI_REQUEST_NULL) && requests[i]->action) {
+ xbt_dynar_push(comms, &requests[i]->action);
+ map[size] = i;
+ size++;
+ }
+ }
+ if(size > 0) {
+ i = simcall_comm_testany(comms);
+ // not MPI_UNDEFINED, as this is a simix return code
+ if(i != -1) {
+ *index = map[i];
+ finish_wait(&requests[*index], status);
+ flag = 1;
+ }
+ }else{
+ //all requests are null or inactive, return true
+ flag=1;
+ smpi_empty_status(status);
+ }
+ xbt_free(map);
+ xbt_dynar_free(&comms);
+ }
+
+ return flag;
+}
+
+
+int smpi_mpi_testall(int count, MPI_Request requests[],
+ MPI_Status status[])
+{
+ MPI_Status stat;
+ MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
+ int flag=1;
+ int i;
+ for(i=0; i<count; i++){
+ if(requests[i]!= MPI_REQUEST_NULL){
+ if (smpi_mpi_test(&requests[i], pstat)!=1){
+ flag=0;
+ }
+ }else{
+ smpi_empty_status(pstat);
+ }
+ if(status != MPI_STATUSES_IGNORE) {
+ memcpy(&status[i], pstat, sizeof(*pstat));
+ }
+ }
+ return flag;
+}
+
+void smpi_mpi_probe(int source, int tag, MPI_Comm comm, MPI_Status* status){
+ int flag=0;
+ //FIXME find another wait to avoid busy waiting ?
+ // the issue here is that we have to wait on a nonexistent comm
+ while(flag==0){
+ smpi_mpi_iprobe(source, tag, comm, &flag, status);
+ XBT_DEBUG("Busy Waiting on probing : %d", flag);
+ if(!flag) {
+ simcall_process_sleep(0.0001);
+ }
+ }
+}
+
+void smpi_mpi_iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
+ MPI_Request request =build_request(NULL, 0, MPI_CHAR, source, smpi_comm_rank(comm), tag,
+ comm, NON_PERSISTENT | RECV);
+
+ // behave like a receive, but don't do it
+ smx_rdv_t mailbox;
+
+ print_request("New iprobe", request);
+ // We have to test both mailboxes as we don't know if we will receive one one or another
+ if (sg_cfg_get_int("smpi/async_small_thres")>0){
+ mailbox = smpi_process_mailbox_small();
+ XBT_DEBUG("trying to probe the perm recv mailbox");
+ request->action = simcall_comm_iprobe(mailbox, request->src, request->tag, &match_recv, (void*)request);
+ }
+ if (request->action==NULL){
+ mailbox = smpi_process_mailbox();
+ XBT_DEBUG("trying to probe the other mailbox");
+ request->action = simcall_comm_iprobe(mailbox, request->src, request->tag, &match_recv, (void*)request);
+ }
+
+ if(request->action){
+ MPI_Request req = (MPI_Request)SIMIX_comm_get_src_data(request->action);
+ *flag = 1;
+ if(status != MPI_STATUS_IGNORE) {
+ status->MPI_SOURCE = req->src;
+ status->MPI_TAG = req->tag;
+ status->MPI_ERROR = MPI_SUCCESS;
+ status->count = req->real_size;
+ }
+ }
+ else *flag = 0;
+ smpi_mpi_request_free(&request);
+