3 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_mpi, smpi,
4 "Logging specific to SMPI (mpi)");
6 int SMPI_MPI_Init(int *argc, char ***argv)
8 smpi_process_init(argc,argv);
13 int SMPI_MPI_Finalize()
16 smpi_process_finalize();
20 // right now this just exits the current node, should send abort signal to all
21 // hosts in the communicator (TODO)
22 int SMPI_MPI_Abort(MPI_Comm comm, int errorcode)
28 int SMPI_MPI_Comm_size(MPI_Comm comm, int *size)
30 int retval = MPI_SUCCESS;
35 retval = MPI_ERR_COMM;
36 } else if (NULL == size) {
47 int SMPI_MPI_Comm_rank(MPI_Comm comm, int *rank)
49 int retval = MPI_SUCCESS;
54 retval = MPI_ERR_COMM;
55 } else if (NULL == rank) {
58 *rank = smpi_mpi_comm_rank(comm);
66 int SMPI_MPI_Type_size(MPI_Datatype datatype, size_t * size)
68 int retval = MPI_SUCCESS;
72 if (NULL == datatype) {
73 retval = MPI_ERR_TYPE;
74 } else if (NULL == size) {
77 *size = datatype->size;
85 int SMPI_MPI_Barrier(MPI_Comm comm)
87 int retval = MPI_SUCCESS;
92 retval = MPI_ERR_COMM;
94 retval = smpi_mpi_barrier(comm);
102 int SMPI_MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int src,
103 int tag, MPI_Comm comm, MPI_Request * request)
105 int retval = MPI_SUCCESS;
109 retval = smpi_create_request(buf, count, datatype, src, 0, tag, comm,
111 if (NULL != *request && MPI_SUCCESS == retval) {
112 retval = smpi_mpi_irecv(*request);
120 int SMPI_MPI_Recv(void *buf, int count, MPI_Datatype datatype, int src,
121 int tag, MPI_Comm comm, MPI_Status * status)
123 int retval = MPI_SUCCESS;
124 smpi_mpi_request_t request;
128 retval = smpi_create_request(buf, count, datatype, src, 0, tag, comm,
130 if (NULL != request && MPI_SUCCESS == retval) {
131 retval = smpi_mpi_irecv(request);
132 if (MPI_SUCCESS == retval) {
133 retval = smpi_mpi_wait(request, status);
135 xbt_mallocator_release(smpi_global->request_mallocator, request);
143 int SMPI_MPI_Isend(void *buf, int count, MPI_Datatype datatype, int dst,
144 int tag, MPI_Comm comm, MPI_Request * request)
146 int retval = MPI_SUCCESS;
150 retval = smpi_create_request(buf, count, datatype, 0, dst, tag, comm,
152 if (NULL != *request && MPI_SUCCESS == retval) {
153 retval = smpi_mpi_isend(*request);
161 int SMPI_MPI_Send(void *buf, int count, MPI_Datatype datatype, int dst,
162 int tag, MPI_Comm comm)
164 int retval = MPI_SUCCESS;
165 smpi_mpi_request_t request;
169 retval = smpi_create_request(buf, count, datatype, 0, dst, tag, comm,
171 if (NULL != request && MPI_SUCCESS == retval) {
172 retval = smpi_mpi_isend(request);
173 if (MPI_SUCCESS == retval) {
174 smpi_mpi_wait(request, MPI_STATUS_IGNORE);
176 xbt_mallocator_release(smpi_global->request_mallocator, request);
184 int SMPI_MPI_Wait(MPI_Request * request, MPI_Status * status)
186 return smpi_mpi_wait(*request, status);
192 int SMPI_MPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root,
196 int retval = MPI_SUCCESS;
198 smpi_mpi_request_t request;
202 rank = smpi_mpi_comm_rank(comm);
205 retval = smpi_create_request(buf, count, datatype, root,
206 (root + 1) % comm->size, 0, comm, &request);
207 request->forward = comm->size - 1;
208 smpi_mpi_isend(request);
210 retval = smpi_create_request(buf, count, datatype, MPI_ANY_SOURCE, rank,
212 smpi_mpi_irecv(request);
215 smpi_mpi_wait(request, MPI_STATUS_IGNORE);
216 xbt_mallocator_release(smpi_global->request_mallocator, request);
227 int SMPI_MPI_Reduce( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
228 MPI_Op op, int root, MPI_Comm comm )
230 int retval = MPI_SUCCESS;
234 smpi_mpi_request_t *tabrequest;
238 rank = smpi_mpi_comm_rank(comm);
241 tabrequest = xbt_malloc((size)*sizeof(smpi_mpi_request_t));
243 if (rank != root) { // if i am not root, simply send my buffer to root
244 retval = smpi_create_request(sendbuf, count, datatype,
245 rank, root, 0, comm, &(tabrequest[rank]));
246 smpi_mpi_isend(tabrequest[rank]);
247 smpi_mpi_wait(tabrequest[rank], MPI_STATUS_IGNORE);
248 //printf("DEBUG: rank %d sent my sendbuf to root (rank %d)\n",rank,root);
250 // i am the root: wait for all buffers by creating requests
251 // i can not use: 'request->forward = size-1;' (which would progagate size-1 receive reqs)
252 // since we should op values as soon as one receiving request matches.
253 for (i=0; i<comm->size; i++) {
254 if ( rank != i ) { // except for me
255 // reminder: for smpi_create_request() the src is always the process sending.
256 retval = smpi_create_request(recvbuf, count, datatype, MPI_ANY_SOURCE, root,
257 0, comm, &(tabrequest[i]));
258 if (NULL != tabrequest[i] && MPI_SUCCESS == retval) {
259 if (MPI_SUCCESS == retval) {
260 smpi_mpi_irecv(tabrequest[i]);
265 // now, wait for completion of all irecv's.
266 // FIXME: we should implement smpi_wait_all for a more asynchronous behavior
267 for (i=0; i<comm->size; i++) {
268 if ( rank != i ) { // except for me
269 smpi_mpi_wait(tabrequest[i], MPI_STATUS_IGNORE);
271 // FIXME: the core part is here. To be written ...
273 fprintf(stderr,"[smpi] %s:%d : MPI_Reduce *Not yet implemented*.\n",__FILE__,__LINE__);
278 for (i=0; i<comm->size; i++)
279 xbt_mallocator_release(smpi_global->request_mallocator, tabrequest[i]);
286 // used by comm_split to sort ranks based on key values
287 int smpi_compare_rankkeys(const void *a, const void *b);
288 int smpi_compare_rankkeys(const void *a, const void *b)
307 int SMPI_MPI_Comm_split(MPI_Comm comm, int color, int key,
310 int retval = MPI_SUCCESS;
313 smpi_mpi_request_t request;
315 smpi_mpi_status_t status;
319 // FIXME: need to test parameters
321 index = smpi_process_index();
322 rank = comm->index_to_rank_map[index];
327 // root node does most of the real work
329 int colormap[comm->size];
330 int keymap[comm->size];
331 int rankkeymap[comm->size * 2];
333 smpi_mpi_communicator_t tempcomm = NULL;
340 // FIXME: use scatter/gather or similar instead of individual comms
341 for (i = 1; i < comm->size; i++) {
342 retval = smpi_create_request(colorkey, 2, MPI_INT, MPI_ANY_SOURCE,
343 rank, MPI_ANY_TAG, comm, &request);
344 smpi_mpi_irecv(request);
345 smpi_mpi_wait(request, &status);
346 colormap[status.MPI_SOURCE] = colorkey[0];
347 keymap[status.MPI_SOURCE] = colorkey[1];
348 xbt_mallocator_release(smpi_global->request_mallocator, request);
351 for (i = 0; i < comm->size; i++) {
352 if (MPI_UNDEFINED == colormap[i]) {
355 // make a list of nodes with current color and sort by keys
357 for (j = i; j < comm->size; j++) {
358 if (colormap[i] == colormap[j]) {
359 colormap[j] = MPI_UNDEFINED;
360 rankkeymap[count * 2] = j;
361 rankkeymap[count * 2 + 1] = keymap[j];
365 qsort(rankkeymap, count, sizeof(int) * 2, &smpi_compare_rankkeys);
368 tempcomm = xbt_new(s_smpi_mpi_communicator_t, 1);
369 tempcomm->barrier_count = 0;
370 tempcomm->size = count;
371 tempcomm->barrier_mutex = SIMIX_mutex_init();
372 tempcomm->barrier_cond = SIMIX_cond_init();
373 tempcomm->rank_to_index_map = xbt_new(int, count);
374 tempcomm->index_to_rank_map = xbt_new(int, smpi_global->process_count);
375 for (j = 0; j < smpi_global->process_count; j++) {
376 tempcomm->index_to_rank_map[j] = -1;
378 for (j = 0; j < count; j++) {
379 indextmp = comm->rank_to_index_map[rankkeymap[j * 2]];
380 tempcomm->rank_to_index_map[j] = indextmp;
381 tempcomm->index_to_rank_map[indextmp] = j;
383 for (j = 0; j < count; j++) {
384 if (rankkeymap[j * 2]) {
385 retval = smpi_create_request(&j, 1, MPI_INT, 0,
386 rankkeymap[j * 2], 0, comm, &request);
387 request->data = tempcomm;
388 smpi_mpi_isend(request);
389 smpi_mpi_wait(request, &status);
390 xbt_mallocator_release(smpi_global->request_mallocator, request);
392 *comm_out = tempcomm;
399 retval = smpi_create_request(colorkey, 2, MPI_INT, rank, 0, 0, comm,
401 smpi_mpi_isend(request);
402 smpi_mpi_wait(request, &status);
403 xbt_mallocator_release(smpi_global->request_mallocator, request);
404 if (MPI_UNDEFINED != color) {
405 retval = smpi_create_request(colorkey, 1, MPI_INT, 0, rank, 0, comm,
407 smpi_mpi_irecv(request);
408 smpi_mpi_wait(request, &status);
409 *comm_out = request->data;
418 double SMPI_MPI_Wtime( void )
420 return ( SIMIX_get_clock() );