void smpi_comm_get_name(MPI_Comm comm, char* name, int* len);
int smpi_comm_rank(MPI_Comm comm);
MPI_Comm smpi_comm_split(MPI_Comm comm, int color, int key);
+void smpi_comm_use(MPI_Comm comm);
+void smpi_comm_unuse(MPI_Comm comm);
MPI_Request smpi_mpi_send_init(void *buf, int count, MPI_Datatype datatype,
int dst, int tag, MPI_Comm comm);
// we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
request->real_size=request->size;
smpi_datatype_use(request->old_type);
+ smpi_comm_use(request->comm);
request->action = simcall_comm_irecv(mailbox, request->buf, &request->real_size, &match_recv, request);
//integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
// we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
request->real_size=request->size;
smpi_datatype_use(request->old_type);
+ smpi_comm_use(request->comm);
//if we are giving back the control to the user without waiting for completion, we have to inject timings
double sleeptime =0.0;
}
if(req->detached == 0) free(req->buf);
}
+ smpi_comm_unuse(req->comm);
smpi_datatype_unuse(datatype);
}
typedef struct s_smpi_mpi_communicator {
MPI_Group group;
+ int refcount;
} s_smpi_mpi_communicator_t;
static int smpi_compare_rankmap(const void *a, const void *b)
comm = xbt_new(s_smpi_mpi_communicator_t, 1);
comm->group = group;
smpi_group_use(comm->group);
+ smpi_comm_use(comm);
return comm;
}
void smpi_comm_destroy(MPI_Comm comm)
{
- xbt_free(comm);
+ smpi_group_unuse(comm->group);
+ smpi_comm_unuse(comm);
}
MPI_Group smpi_comm_group(MPI_Comm comm)
}
return group_out ? smpi_comm_new(group_out) : MPI_COMM_NULL;
}
+
+void smpi_comm_use(MPI_Comm comm){
+ comm->refcount++;
+ smpi_group_use(comm->group);
+}
+
+void smpi_comm_unuse(MPI_Comm comm){
+ comm->refcount--;
+ smpi_group_unuse(comm->group);
+ if(comm->refcount==0)
+ xbt_free(comm);
+}