- if (*datatype == smpi_mpi_global->mpi_int) {
- int *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] && y[i];
- }
- }
-}
-
-/**
- * sum two vectors element-wise
- *
- * @param a the first vectors
- * @param b the second vectors
- * @return the second vector is modified and contains the element-wise sums
- **/
-void smpi_mpi_sum_func(void *a, void *b, int *length,
- MPI_Datatype * datatype);
-
-void smpi_mpi_sum_func(void *a, void *b, int *length, MPI_Datatype * datatype)
-{
- int i;
- if (*datatype == smpi_mpi_global->mpi_byte) {
- char *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] + y[i];
- }
- } else if (*datatype == smpi_mpi_global->mpi_int) {
- int *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] + y[i];
- }
- } else if (*datatype == smpi_mpi_global->mpi_float) {
- float *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] + y[i];
- }
- } else if (*datatype == smpi_mpi_global->mpi_double) {
- double *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] + y[i];
- }
- }
-}
-/**
- * compute the min of two vectors element-wise
- **/
-void smpi_mpi_min_func(void *a, void *b, int *length, MPI_Datatype * datatype);
-
-void smpi_mpi_min_func(void *a, void *b, int *length, MPI_Datatype * datatype)
-{
- int i;
- if (*datatype == smpi_mpi_global->mpi_byte) {
- char *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] < y[i] ? x[i] : y[i];
- }
- } else {
- if (*datatype == smpi_mpi_global->mpi_int) {
- int *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] < y[i] ? x[i] : y[i];
- }
- } else {
- if (*datatype == smpi_mpi_global->mpi_float) {
- float *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] < y[i] ? x[i] : y[i];
- }
- } else {
- if (*datatype == smpi_mpi_global->mpi_double) {
- double *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] < y[i] ? x[i] : y[i];
- }
-
- }}}}
-}
-/**
- * compute the max of two vectors element-wise
- **/
-void smpi_mpi_max_func(void *a, void *b, int *length, MPI_Datatype * datatype);
-
-void smpi_mpi_max_func(void *a, void *b, int *length, MPI_Datatype * datatype)
-{
- int i;
- if (*datatype == smpi_mpi_global->mpi_byte) {
- char *x = a, *y = b;
- for (i = 0; i < *length; i++) {
- y[i] = x[i] > y[i] ? x[i] : y[i];
- }
- } else if (*datatype == smpi_mpi_global->mpi_int) {
- int *x = a, *y = b;
- for (i = 0; i > *length; i++) {
- y[i] = x[i] > y[i] ? x[i] : y[i];
- }
- } else if (*datatype == smpi_mpi_global->mpi_float) {
- float *x = a, *y = b;
- for (i = 0; i > *length; i++) {
- y[i] = x[i] > y[i] ? x[i] : y[i];
- }
- } else if (*datatype == smpi_mpi_global->mpi_double) {
- double *x = a, *y = b;
- for (i = 0; i > *length; i++) {
- y[i] = x[i] > y[i] ? x[i] : y[i];
- }
-
- }
-}
-
-
-
-
-/**
- * tell the MPI rank of the calling process (from its SIMIX process id)
- **/
-int smpi_mpi_comm_rank(smpi_mpi_communicator_t comm)
-{
- return comm->index_to_rank_map[smpi_process_index()];
-}
-
-void smpi_process_init(int *argc, char***argv)
-{
- smpi_process_data_t pdata;
-
- // initialize some local variables
-
- pdata = xbt_new(s_smpi_process_data_t, 1);
- SIMIX_process_set_data(SIMIX_process_self(),pdata);
-
- /* get rank from command line, and remove it from argv */
- pdata->index = atoi( (*argv)[1] );
- DEBUG1("I'm rank %d",pdata->index);
- if (*argc>2) {
- memmove((*argv)[1],(*argv)[2], sizeof(char*)* (*argc-2));
- (*argv)[ (*argc)-1] = NULL;
- }
- (*argc)--;
-
- pdata->mutex = SIMIX_mutex_init();
- pdata->cond = SIMIX_cond_init();
- pdata->finalize = 0;
-
- pdata->pending_recv_request_queue = xbt_fifo_new();
- pdata->pending_send_request_queue = xbt_fifo_new();
- pdata->received_message_queue = xbt_fifo_new();
-
- pdata->main = SIMIX_process_self();
- pdata->sender = SIMIX_process_create("smpi_sender",
- smpi_sender, pdata,
- SIMIX_host_get_name(SIMIX_host_self()), 0, NULL,
- /*props */ NULL);
- pdata->receiver = SIMIX_process_create("smpi_receiver",
- smpi_receiver, pdata,
- SIMIX_host_get_name(SIMIX_host_self()), 0, NULL,
- /*props */ NULL);
-
- smpi_global->main_processes[pdata->index] = SIMIX_process_self();
+ if(requests==NULL) return;
+
+ for(i = 0; i < count; i++) {
+ smpi_mpi_start(requests[i]);
+ }
+}
+
+void smpi_mpi_request_free(MPI_Request * request)
+{
+ if((*request) != MPI_REQUEST_NULL){
+ (*request)->refcount--;
+ if((*request)->refcount<0) xbt_die("wrong refcount");
+
+ if((*request)->refcount==0){
+ print_request("Destroying", (*request));
+ xbt_free(*request);
+ *request = MPI_REQUEST_NULL;
+ }else{
+ print_request("Decrementing", (*request));
+ }
+ }else{
+ xbt_die("freeing an already free request");
+ }
+}
+
+
+MPI_Request smpi_rma_send_init(void *buf, int count, MPI_Datatype datatype,
+ int src, int dst, int tag, MPI_Comm comm, MPI_Op op)
+{
+ MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
+ if(op==MPI_OP_NULL){
+ request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf , count, datatype, src, dst, tag,
+ comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED);
+ }else{
+ request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src, dst, tag,
+ comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED | ACCUMULATE);
+ request->op = op;
+ }
+ return request;
+}
+
+MPI_Request smpi_rma_recv_init(void *buf, int count, MPI_Datatype datatype,
+ int src, int dst, int tag, MPI_Comm comm, MPI_Op op)
+{
+ MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
+ if(op==MPI_OP_NULL){
+ request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src, dst, tag,
+ comm, RMA | NON_PERSISTENT | RECV | PREPARED);
+ }else{
+ request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src, dst, tag,
+ comm, RMA | NON_PERSISTENT | RECV | PREPARED | ACCUMULATE);
+ request->op = op;
+ }
+ return request;
+}
+
+
+MPI_Request smpi_isend_init(void *buf, int count, MPI_Datatype datatype,
+ int dst, int tag, MPI_Comm comm)
+{
+ MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
+ request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf , count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
+ comm, PERSISTENT | ISEND | SEND | PREPARED);
+ return request;
+}
+
+MPI_Request smpi_mpi_isend(void *buf, int count, MPI_Datatype datatype,
+ int dst, int tag, MPI_Comm comm)
+{
+ MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
+ request = build_request(buf==MPI_BOTTOM?(void*)0:buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
+ comm, NON_PERSISTENT | ISEND | SEND);
+ smpi_mpi_start(request);
+ return request;
+}
+
+MPI_Request smpi_mpi_issend(void *buf, int count, MPI_Datatype datatype,
+ int dst, int tag, MPI_Comm comm)
+{
+ MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
+ request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
+ comm, NON_PERSISTENT | ISEND | SSEND | SEND);
+ smpi_mpi_start(request);
+ return request;
+}
+
+MPI_Request smpi_irecv_init(void *buf, int count, MPI_Datatype datatype,
+ int src, int tag, MPI_Comm comm)
+{
+ MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
+ request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag,
+ comm, PERSISTENT | RECV | PREPARED);
+ return request;
+}
+
+MPI_Request smpi_mpi_irecv(void *buf, int count, MPI_Datatype datatype,
+ int src, int tag, MPI_Comm comm)
+{
+ MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
+ request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag,
+ comm, NON_PERSISTENT | RECV);
+ smpi_mpi_start(request);
+ return request;
+}
+
+void smpi_mpi_recv(void *buf, int count, MPI_Datatype datatype, int src,
+ int tag, MPI_Comm comm, MPI_Status * status)
+{
+ MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
+ request = smpi_mpi_irecv(buf, count, datatype, src, tag, comm);
+ smpi_mpi_wait(&request, status);
+ request = NULL;
+}
+
+
+
+void smpi_mpi_send(void *buf, int count, MPI_Datatype datatype, int dst,
+ int tag, MPI_Comm comm)
+{
+ MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
+ request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
+ comm, NON_PERSISTENT | SEND);
+
+ smpi_mpi_start(request);
+ smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
+ request = NULL;
+}
+
+void smpi_mpi_ssend(void *buf, int count, MPI_Datatype datatype,
+ int dst, int tag, MPI_Comm comm)
+{
+ MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
+ request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
+ comm, NON_PERSISTENT | SSEND | SEND);
+
+ smpi_mpi_start(request);
+ smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
+ request = NULL;
+}
+
+void smpi_mpi_sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
+ int dst, int sendtag, void *recvbuf, int recvcount,
+ MPI_Datatype recvtype, int src, int recvtag,
+ MPI_Comm comm, MPI_Status * status)
+{
+ MPI_Request requests[2];
+ MPI_Status stats[2];
+ int myid=smpi_process_index();
+ if ((smpi_group_index(smpi_comm_group(comm), dst) == myid) && (smpi_group_index(smpi_comm_group(comm), src) == myid)) {
+ smpi_datatype_copy(sendbuf, sendcount, sendtype,
+ recvbuf, recvcount, recvtype);
+ return;
+ }
+ requests[0] =
+ smpi_isend_init(sendbuf, sendcount, sendtype, dst, sendtag, comm);
+ requests[1] =
+ smpi_irecv_init(recvbuf, recvcount, recvtype, src, recvtag, comm);
+ smpi_mpi_startall(2, requests);
+ smpi_mpi_waitall(2, requests, stats);
+ smpi_mpi_request_free(&requests[0]);
+ smpi_mpi_request_free(&requests[1]);
+ if(status != MPI_STATUS_IGNORE) {
+ // Copy receive status
+ *status = stats[1];
+ }
+}
+
+int smpi_mpi_get_count(MPI_Status * status, MPI_Datatype datatype)
+{
+ return status->count / smpi_datatype_size(datatype);
+}
+
+static void finish_wait(MPI_Request * request, MPI_Status * status)
+{
+ MPI_Request req = *request;
+ smpi_empty_status(status);
+
+ if(!(req->detached && req->flags & SEND)
+ && !(req->flags & PREPARED)){
+ if(status != MPI_STATUS_IGNORE) {
+ int src = req->src == MPI_ANY_SOURCE ? req->real_src : req->src;
+ status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(req->comm), src);
+ status->MPI_TAG = req->tag == MPI_ANY_TAG ? req->real_tag : req->tag;
+ status->MPI_ERROR = req->truncated ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
+ // this handles the case were size in receive differs from size in send
+ // FIXME: really this should just contain the count of receive-type blocks,
+ // right?
+ status->count = req->real_size;
+ }
+
+ print_request("Finishing", req);
+ MPI_Datatype datatype = req->old_type;
+
+ if((req->flags & ACCUMULATE) || (datatype->has_subtype == 1)){
+ if (!smpi_process_get_replaying()){
+ if( smpi_privatize_global_variables
+ && ((char*)req->old_buf >= start_data_exe)
+ && ((char*)req->old_buf < start_data_exe + size_data_exe )
+ ){
+ XBT_VERB("Privatization : We are unserializing to a zone in global memory - Switch data segment ");
+ smpi_switch_data_segment(smpi_process_index());
+ }
+ }
+
+ if(datatype->has_subtype == 1){
+ // This part handles the problem of non-contignous memory
+ // the unserialization at the reception
+ s_smpi_subtype_t *subtype = datatype->substruct;
+ if(req->flags & RECV)
+ subtype->unserialize(req->buf, req->old_buf, req->real_size/smpi_datatype_size(datatype) , datatype->substruct, req->op);
+ if(req->detached == 0) free(req->buf);
+ }else if(req->flags & RECV){//apply op on contiguous buffer for accumulate
+ int n =req->real_size/smpi_datatype_size(datatype);
+ smpi_op_apply(req->op, req->buf, req->old_buf, &n, &datatype);
+ }
+ }
+ smpi_comm_unuse(req->comm);
+ smpi_datatype_unuse(datatype);
+
+ }
+
+#ifdef HAVE_TRACING
+ if (TRACE_smpi_view_internals()) {
+ if(req->flags & RECV){
+ int rank = smpi_process_index();
+ int src_traced = (req->src == MPI_ANY_SOURCE ? req->real_src : req->src);
+ TRACE_smpi_recv(rank, src_traced, rank);
+ }
+ }
+#endif
+
+ if(req->detached_sender!=NULL){
+ smpi_mpi_request_free(&(req->detached_sender));
+ }
+ if(req->flags & PERSISTENT)
+ req->action = NULL;
+ req->flags |= FINISHED;
+
+ smpi_mpi_request_free(request);
+
+}
+
+int smpi_mpi_test(MPI_Request * request, MPI_Status * status) {
+ int flag;
+
+ //assume that request is not MPI_REQUEST_NULL (filtered in PMPI_Test or smpi_mpi_testall before)
+
+ //to avoid deadlocks
+ //multiplier to the sleeptime, to increase speed of execution, each failed test will increase it
+ static int nsleeps = 1;
+ if(smpi_test_sleep > 0) simcall_process_sleep(nsleeps*smpi_test_sleep);
+
+ smpi_empty_status(status);
+ flag = 1;
+ if (!((*request)->flags & PREPARED)) {
+ if ((*request)->action != NULL)
+ flag = simcall_comm_test((*request)->action);
+ if (flag) {
+ finish_wait(request, status);
+ nsleeps=1;//reset the number of sleeps we will do next time
+ if (*request != MPI_REQUEST_NULL && !((*request)->flags & PERSISTENT))
+ *request = MPI_REQUEST_NULL;
+ }else{
+ nsleeps++;
+ }
+ }
+ return flag;
+}
+
+int smpi_mpi_testany(int count, MPI_Request requests[], int *index,
+ MPI_Status * status)
+{
+ xbt_dynar_t comms;
+ int i, flag, size;
+ int* map;
+
+ *index = MPI_UNDEFINED;
+ flag = 0;
+ comms = xbt_dynar_new(sizeof(smx_synchro_t), NULL);
+ map = xbt_new(int, count);
+ size = 0;
+ for(i = 0; i < count; i++) {
+ if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action &&
+ !(requests[i]->flags & PREPARED)) {
+ xbt_dynar_push(comms, &requests[i]->action);
+ map[size] = i;
+ size++;
+ }
+ }
+ if(size > 0) {
+ //multiplier to the sleeptime, to increase speed of execution, each failed testany will increase it
+ static int nsleeps = 1;
+ if(smpi_test_sleep > 0) simcall_process_sleep(nsleeps*smpi_test_sleep);
+
+ i = simcall_comm_testany(comms);
+ // not MPI_UNDEFINED, as this is a simix return code
+ if(i != -1) {
+ *index = map[i];
+ finish_wait(&requests[*index], status);
+ if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags & NON_PERSISTENT))
+ requests[*index] = MPI_REQUEST_NULL;
+ flag = 1;
+ nsleeps=1;
+ }else{
+ nsleeps++;
+ }
+ }else{
+ //all requests are null or inactive, return true
+ flag=1;
+ smpi_empty_status(status);
+ }
+ xbt_free(map);
+ xbt_dynar_free(&comms);
+
+ return flag;
+}
+
+
+int smpi_mpi_testall(int count, MPI_Request requests[],
+ MPI_Status status[])
+{
+ MPI_Status stat;
+ MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
+ int flag=1;
+ int i;
+ for(i=0; i<count; i++){
+ if (requests[i] != MPI_REQUEST_NULL && !(requests[i]->flags & PREPARED)) {
+ if (smpi_mpi_test(&requests[i], pstat)!=1){
+ flag=0;
+ }else{
+ requests[i]=MPI_REQUEST_NULL;
+ }
+ }else{
+ smpi_empty_status(pstat);
+ }
+ if(status != MPI_STATUSES_IGNORE) {
+ status[i] = *pstat;
+ }
+ }
+ return flag;
+}
+
+void smpi_mpi_probe(int source, int tag, MPI_Comm comm, MPI_Status* status){
+ int flag=0;
+ //FIXME find another wait to avoid busy waiting ?
+ // the issue here is that we have to wait on a nonexistent comm
+ while(flag==0){
+ smpi_mpi_iprobe(source, tag, comm, &flag, status);
+ XBT_DEBUG("Busy Waiting on probing : %d", flag);
+ }
+}
+
+void smpi_mpi_iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
+
+ MPI_Request request =build_request(NULL, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), source), smpi_comm_rank(comm), tag,
+ comm, PERSISTENT | RECV);
+
+ //to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls
+ //multiplier to the sleeptime, to increase speed of execution, each failed iprobe will increase it
+ static int nsleeps = 1;
+ if(smpi_iprobe_sleep > 0) simcall_process_sleep(nsleeps*smpi_iprobe_sleep);
+ // behave like a receive, but don't do it
+ smx_rdv_t mailbox;
+
+ print_request("New iprobe", request);
+ // We have to test both mailboxes as we don't know if we will receive one one or another
+ if (sg_cfg_get_int("smpi/async_small_thres")>0){
+ mailbox = smpi_process_mailbox_small();
+ XBT_DEBUG("trying to probe the perm recv mailbox");
+ request->action = simcall_comm_iprobe(mailbox, 0, request->src, request->tag, &match_recv, (void*)request);
+ }
+ if (request->action==NULL){
+ mailbox = smpi_process_mailbox();
+ XBT_DEBUG("trying to probe the other mailbox");
+ request->action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, (void*)request);
+ }
+
+ if(request->action){
+ MPI_Request req = (MPI_Request)SIMIX_comm_get_src_data(request->action);
+ *flag = 1;
+ if(status != MPI_STATUS_IGNORE && !(req->flags & PREPARED)) {
+ status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(comm), req->src);
+ status->MPI_TAG = req->tag;
+ status->MPI_ERROR = MPI_SUCCESS;
+ status->count = req->real_size;
+ }
+ nsleeps=1;//reset the number of sleeps we will do next time
+ }
+ else {
+ *flag = 0;
+ nsleeps++;
+ }
+ smpi_mpi_request_free(&request);
+