1 /* Copyright (c) 2010-2014. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
10 #include "smpi_mpi_dt_private.h"
12 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi,
13 "Logging specific to SMPI (comm)");
17 /* Support for cartesian topology was added, but there are 2 other types of
18 * topology, graph et dist graph. In order to support them, we have to add a
19 * field MPIR_Topo_type, and replace the MPI_Topology field by an union. */
21 typedef struct s_smpi_mpi_communicator {
23 MPIR_Topo_type topoType;
24 MPI_Topology topo; // to be replaced by an union
26 } s_smpi_mpi_communicator_t;
28 static int smpi_compare_rankmap(const void *a, const void *b)
30 const int* x = (const int*)a;
31 const int* y = (const int*)b;
48 MPI_Comm smpi_comm_new(MPI_Group group, MPI_Topology topo)
52 comm = xbt_new(s_smpi_mpi_communicator_t, 1);
54 smpi_group_use(comm->group);
60 void smpi_comm_destroy(MPI_Comm comm)
62 smpi_group_unuse(comm->group);
63 smpi_topo_destroy(comm->topo); // there's no use count on topos
64 smpi_comm_unuse(comm);
67 MPI_Group smpi_comm_group(MPI_Comm comm)
72 MPI_Topology smpi_comm_topo(MPI_Comm comm) {
73 if (comm != MPI_COMM_NULL)
78 int smpi_comm_size(MPI_Comm comm)
80 return smpi_group_size(smpi_comm_group(comm));
83 int smpi_comm_rank(MPI_Comm comm)
85 return smpi_group_rank(smpi_comm_group(comm), smpi_process_index());
88 void smpi_comm_get_name (MPI_Comm comm, char* name, int* len)
90 if(comm == MPI_COMM_WORLD) {
91 strcpy(name, "WORLD");
94 *len = snprintf(name, MPI_MAX_NAME_STRING, "%p", comm);
98 MPI_Comm smpi_comm_split(MPI_Comm comm, int color, int key)
100 int system_tag = 123;
101 int index, rank, size, i, j, count, reqs;
105 MPI_Group group, group_root, group_out;
106 MPI_Request* requests;
108 group_root = group_out = NULL;
109 group = smpi_comm_group(comm);
110 rank = smpi_comm_rank(comm);
111 size = smpi_comm_size(comm);
112 /* Gather all colors and keys on rank 0 */
113 sendbuf = xbt_new(int, 2);
117 recvbuf = xbt_new(int, 2 * size);
121 smpi_mpi_gather(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0, comm);
123 /* Do the actual job */
125 rankmap = xbt_new(int, 2 * size);
126 for(i = 0; i < size; i++) {
127 if(recvbuf[2 * i] == MPI_UNDEFINED) {
131 for(j = i + 1; j < size; j++) {
132 if(recvbuf[2 * i] == recvbuf[2 * j]) {
133 recvbuf[2 * j] = MPI_UNDEFINED;
134 rankmap[2 * count] = j;
135 rankmap[2 * count + 1] = recvbuf[2 * j + 1];
139 /* Add self in the group */
140 recvbuf[2 * i] = MPI_UNDEFINED;
141 rankmap[2 * count] = i;
142 rankmap[2 * count + 1] = recvbuf[2 * i + 1];
144 qsort(rankmap, count, 2 * sizeof(int), &smpi_compare_rankmap);
145 group_out = smpi_group_new(count);
147 group_root = group_out; /* Save root's group */
149 for(j = 0; j < count; j++) {
150 //increment refcounter in order to avoid freeing the group too quick before copy
151 index = smpi_group_index(group, rankmap[2 * j]);
152 smpi_group_set_mapping(group_out, index, j);
154 requests = xbt_new(MPI_Request, count);
156 for(j = 0; j < count; j++) {
157 if(rankmap[2 * j] != 0) {
158 requests[reqs] = smpi_isend_init(&group_out, 1, MPI_PTR, rankmap[2 * j], system_tag, comm);
162 smpi_mpi_startall(reqs, requests);
163 smpi_mpi_waitall(reqs, requests, MPI_STATUS_IGNORE);
167 group_out = group_root; /* exit with root's group */
169 if(color != MPI_UNDEFINED) {
170 smpi_mpi_recv(&group_out, 1, MPI_PTR, 0, system_tag, comm, MPI_STATUS_IGNORE);
172 group_out=smpi_group_copy(group_out);
174 } /* otherwise, exit with group_out == NULL */
176 return group_out ? smpi_comm_new(group_out, NULL) : MPI_COMM_NULL;
179 void smpi_comm_use(MPI_Comm comm){
183 void smpi_comm_unuse(MPI_Comm comm){
185 if(comm->refcount==0)