-
+/* $Id$tag */
#include "private.h"
#include "smpi_coll_private.h"
+#include "smpi_mpi_dt_private.h"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_mpi, smpi,
"Logging specific to SMPI (mpi)");
-int SMPI_MPI_Init(int *argc, char ***argv)
-{
+/* MPI User level calls */
+
+int MPI_Init(int* argc, char*** argv) {
smpi_process_init(argc, argv);
smpi_bench_begin();
return MPI_SUCCESS;
}
-int SMPI_MPI_Finalize()
-{
+int MPI_Finalize(void) {
smpi_bench_end();
- smpi_process_finalize();
+ smpi_process_destroy();
return MPI_SUCCESS;
}
-// right now this just exits the current node, should send abort signal to all
-// hosts in the communicator (TODO)
-int SMPI_MPI_Abort(MPI_Comm comm, int errorcode)
-{
- smpi_exit(errorcode);
- return 0;
+int MPI_Init_thread(int* argc, char*** argv, int required, int* provided) {
+ if(provided != NULL) {
+ *provided = MPI_THREAD_MULTIPLE;
+ }
+ return MPI_Init(argc, argv);
}
-int SMPI_MPI_Comm_size(MPI_Comm comm, int *size)
-{
- int retval = MPI_SUCCESS;
+int MPI_Query_thread(int* provided) {
+ int retval;
smpi_bench_end();
-
- if (NULL == comm) {
- retval = MPI_ERR_COMM;
- } else if (NULL == size) {
+ if(provided == NULL) {
retval = MPI_ERR_ARG;
} else {
- *size = comm->size;
+ *provided = MPI_THREAD_MULTIPLE;
+ retval = MPI_SUCCESS;
}
-
smpi_bench_begin();
-
return retval;
}
-int SMPI_MPI_Comm_rank(MPI_Comm comm, int *rank)
-{
- int retval = MPI_SUCCESS;
+int MPI_Is_thread_main(int* flag) {
+ int retval;
smpi_bench_end();
-
- if (NULL == comm) {
- retval = MPI_ERR_COMM;
- } else if (NULL == rank) {
+ if(flag == NULL) {
retval = MPI_ERR_ARG;
} else {
- *rank = smpi_mpi_comm_rank(comm);
+ *flag = smpi_process_index() == 0;
+ retval = MPI_SUCCESS;
}
-
smpi_bench_begin();
-
return retval;
}
-int SMPI_MPI_Type_size(MPI_Datatype datatype, size_t * size)
-{
- int retval = MPI_SUCCESS;
+int MPI_Abort(MPI_Comm comm, int errorcode) {
+ smpi_bench_end();
+ smpi_process_destroy();
+ // FIXME: should kill all processes in comm instead
+ SIMIX_process_kill(SIMIX_process_self());
+ return MPI_SUCCESS;
+}
+
+double MPI_Wtime(void) {
+ double time;
smpi_bench_end();
+ time = SIMIX_get_clock();
+ smpi_bench_begin();
+ return time;
+}
- if (NULL == datatype) {
+int MPI_Type_size(MPI_Datatype datatype, size_t* size) {
+ int retval;
+
+ smpi_bench_end();
+ if(datatype == MPI_DATATYPE_NULL) {
retval = MPI_ERR_TYPE;
- } else if (NULL == size) {
+ } else if(size == NULL) {
retval = MPI_ERR_ARG;
} else {
- *size = datatype->size;
+ *size = smpi_datatype_size(datatype);
+ retval = MPI_SUCCESS;
}
-
smpi_bench_begin();
-
return retval;
}
-int SMPI_MPI_Barrier(MPI_Comm comm)
-{
- int retval = MPI_SUCCESS;
- int arity=4;
+int MPI_Type_get_extent(MPI_Datatype datatype, MPI_Aint* lb, MPI_Aint* extent) {
+ int retval;
smpi_bench_end();
-
- if (NULL == comm) {
- retval = MPI_ERR_COMM;
+ if(datatype == MPI_DATATYPE_NULL) {
+ retval = MPI_ERR_TYPE;
+ } else if(lb == NULL || extent == NULL) {
+ retval = MPI_ERR_ARG;
} else {
-
- /*
- * original implemantation:
- * retval = smpi_mpi_barrier(comm);
- * this one is unrealistic: it just cond_waits, means no time.
- */
- retval = nary_tree_barrier( comm, arity );
+ retval = smpi_datatype_extent(datatype, lb, extent);
}
+ smpi_bench_begin();
+ return retval;
+}
+
+int MPI_Type_lb(MPI_Datatype datatype, MPI_Aint* disp) {
+ int retval;
+ smpi_bench_end();
+ if(datatype == MPI_DATATYPE_NULL) {
+ retval = MPI_ERR_TYPE;
+ } else if(disp == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ *disp = smpi_datatype_lb(datatype);
+ retval = MPI_SUCCESS;
+ }
smpi_bench_begin();
+ return retval;
+}
+
+int MPI_Type_ub(MPI_Datatype datatype, MPI_Aint* disp) {
+ int retval;
+ smpi_bench_end();
+ if(datatype == MPI_DATATYPE_NULL) {
+ retval = MPI_ERR_TYPE;
+ } else if(disp == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ *disp = smpi_datatype_ub(datatype);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
return retval;
}
-int SMPI_MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int src,
- int tag, MPI_Comm comm, MPI_Request * request)
-{
- int retval = MPI_SUCCESS;
+int MPI_Op_create(MPI_User_function* function, int commute, MPI_Op* op) {
+ int retval;
smpi_bench_end();
+ if(function == NULL || op == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ *op = smpi_op_new(function, commute);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int MPI_Op_free(MPI_Op* op) {
+ int retval;
- retval = smpi_create_request(buf, count, datatype, src, 0, tag, comm,
- request);
- if (NULL != *request && MPI_SUCCESS == retval) {
- retval = smpi_mpi_irecv(*request);
+ smpi_bench_end();
+ if(op == NULL) {
+ retval = MPI_ERR_ARG;
+ } else if(*op == MPI_OP_NULL) {
+ retval = MPI_ERR_OP;
+ } else {
+ smpi_op_destroy(*op);
+ *op = MPI_OP_NULL;
+ retval = MPI_SUCCESS;
}
+ smpi_bench_begin();
+ return retval;
+}
+
+int MPI_Group_free(MPI_Group *group) {
+ int retval;
+ smpi_bench_end();
+ if(group == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ smpi_group_destroy(*group);
+ *group = MPI_GROUP_NULL;
+ retval = MPI_SUCCESS;
+ }
smpi_bench_begin();
+ return retval;
+}
+
+int MPI_Group_size(MPI_Group group, int* size) {
+ int retval;
+ smpi_bench_end();
+ if(group == MPI_GROUP_NULL) {
+ retval = MPI_ERR_GROUP;
+ } else if(size == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ *size = smpi_group_size(group);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
return retval;
}
-int SMPI_MPI_Recv(void *buf, int count, MPI_Datatype datatype, int src,
- int tag, MPI_Comm comm, MPI_Status * status)
-{
- int retval = MPI_SUCCESS;
- smpi_mpi_request_t request;
+int MPI_Group_rank(MPI_Group group, int* rank) {
+ int retval;
smpi_bench_end();
+ if(group == MPI_GROUP_NULL) {
+ retval = MPI_ERR_GROUP;
+ } else if(rank == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ *rank = smpi_group_rank(group, smpi_process_index());
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int MPI_Group_translate_ranks (MPI_Group group1, int n, int* ranks1, MPI_Group group2, int* ranks2) {
+ int retval, i, index;
- retval = smpi_create_request(buf, count, datatype, src, 0, tag, comm,
- &request);
- if (NULL != request && MPI_SUCCESS == retval) {
- retval = smpi_mpi_irecv(request);
- if (MPI_SUCCESS == retval) {
- retval = smpi_mpi_wait(request, status);
+ smpi_bench_end();
+ if(group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) {
+ retval = MPI_ERR_GROUP;
+ } else {
+ for(i = 0; i < n; i++) {
+ index = smpi_group_index(group1, ranks1[i]);
+ ranks2[i] = smpi_group_rank(group2, index);
}
- xbt_mallocator_release(smpi_global->request_mallocator, request);
+ retval = MPI_SUCCESS;
}
-
smpi_bench_begin();
-
return retval;
}
-int SMPI_MPI_Isend(void *buf, int count, MPI_Datatype datatype, int dst,
- int tag, MPI_Comm comm, MPI_Request * request)
-{
- int retval = MPI_SUCCESS;
+int MPI_Group_compare(MPI_Group group1, MPI_Group group2, int* result) {
+ int retval;
smpi_bench_end();
-
- retval = smpi_create_request(buf, count, datatype, 0, dst, tag, comm,
- request);
- if (NULL != *request && MPI_SUCCESS == retval) {
- retval = smpi_mpi_isend(*request);
+ if(group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) {
+ retval = MPI_ERR_GROUP;
+ } else if(result == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ *result = smpi_group_compare(group1, group2);
+ retval = MPI_SUCCESS;
}
-
smpi_bench_begin();
-
return retval;
}
-int SMPI_MPI_Send(void *buf, int count, MPI_Datatype datatype, int dst,
- int tag, MPI_Comm comm)
-{
- int retval = MPI_SUCCESS;
- smpi_mpi_request_t request;
+int MPI_Group_union(MPI_Group group1, MPI_Group group2, MPI_Group* newgroup) {
+ int retval, i, proc1, proc2, size, size2;
smpi_bench_end();
-
- retval = smpi_create_request(buf, count, datatype, 0, dst, tag, comm,
- &request);
- if (NULL != request && MPI_SUCCESS == retval) {
- retval = smpi_mpi_isend(request);
- if (MPI_SUCCESS == retval) {
- smpi_mpi_wait(request, MPI_STATUS_IGNORE);
+ if(group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) {
+ retval = MPI_ERR_GROUP;
+ } else if(newgroup == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ size = smpi_group_size(group1);
+ size2 = smpi_group_size(group2);
+ for(i = 0; i < size2; i++) {
+ proc2 = smpi_group_index(group2, i);
+ proc1 = smpi_group_rank(group1, proc2);
+ if(proc1 == MPI_UNDEFINED) {
+ size++;
+ }
}
- xbt_mallocator_release(smpi_global->request_mallocator, request);
+ if(size == 0) {
+ *newgroup = MPI_GROUP_EMPTY;
+ } else {
+ *newgroup = smpi_group_new(size);
+ size2 = smpi_group_size(group1);
+ for(i = 0; i < size2; i++) {
+ proc1 = smpi_group_index(group1, i);
+ smpi_group_set_mapping(*newgroup, proc1, i);
+ }
+ for(i = size2; i < size; i++) {
+ proc2 = smpi_group_index(group2, i - size2);
+ smpi_group_set_mapping(*newgroup, proc2, i);
+ }
+ }
+ smpi_group_use(*newgroup);
+ retval = MPI_SUCCESS;
}
+ smpi_bench_begin();
+ return retval;
+}
+
+int MPI_Group_intersection(MPI_Group group1, MPI_Group group2, MPI_Group* newgroup) {
+ int retval, i, proc1, proc2, size, size2;
+ smpi_bench_end();
+ if(group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) {
+ retval = MPI_ERR_GROUP;
+ } else if(newgroup == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ size = smpi_group_size(group1);
+ size2 = smpi_group_size(group2);
+ for(i = 0; i < size2; i++) {
+ proc2 = smpi_group_index(group2, i);
+ proc1 = smpi_group_rank(group1, proc2);
+ if(proc1 == MPI_UNDEFINED) {
+ size--;
+ }
+ }
+ if(size == 0) {
+ *newgroup = MPI_GROUP_EMPTY;
+ } else {
+ *newgroup = smpi_group_new(size);
+ size2 = smpi_group_size(group1);
+ for(i = 0; i < size2; i++) {
+ proc1 = smpi_group_index(group1, i);
+ proc2 = smpi_group_rank(group2, proc1);
+ if(proc2 != MPI_UNDEFINED) {
+ smpi_group_set_mapping(*newgroup, proc1, i);
+ }
+ }
+ }
+ smpi_group_use(*newgroup);
+ retval = MPI_SUCCESS;
+ }
smpi_bench_begin();
+ return retval;
+}
+
+int MPI_Group_difference(MPI_Group group1, MPI_Group group2, MPI_Group* newgroup) {
+ int retval, i, proc1, proc2, size, size2;
+ smpi_bench_end();
+ if(group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) {
+ retval = MPI_ERR_GROUP;
+ } else if(newgroup == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ size = size2 = smpi_group_size(group1);
+ for(i = 0; i < size2; i++) {
+ proc1 = smpi_group_index(group1, i);
+ proc2 = smpi_group_rank(group2, proc1);
+ if(proc2 != MPI_UNDEFINED) {
+ size--;
+ }
+ }
+ if(size == 0) {
+ *newgroup = MPI_GROUP_EMPTY;
+ } else {
+ *newgroup = smpi_group_new(size);
+ for(i = 0; i < size2; i++) {
+ proc1 = smpi_group_index(group1, i);
+ proc2 = smpi_group_rank(group2, proc1);
+ if(proc2 == MPI_UNDEFINED) {
+ smpi_group_set_mapping(*newgroup, proc1, i);
+ }
+ }
+ }
+ smpi_group_use(*newgroup);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
return retval;
}
-/**
- * MPI_Wait and friends
- **/
-int SMPI_MPI_Wait(MPI_Request * request, MPI_Status * status)
-{
- return smpi_mpi_wait(*request, status);
+int MPI_Group_incl(MPI_Group group, int n, int* ranks, MPI_Group* newgroup) {
+ int retval, i, index;
+
+ smpi_bench_end();
+ if(group == MPI_GROUP_NULL) {
+ retval = MPI_ERR_GROUP;
+ } else if(newgroup == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ if(n == 0) {
+ *newgroup = MPI_GROUP_EMPTY;
+ } else if(n == smpi_group_size(group)) {
+ *newgroup = group;
+ } else {
+ *newgroup = smpi_group_new(n);
+ for(i = 0; i < n; i++) {
+ index = smpi_group_index(group, ranks[i]);
+ smpi_group_set_mapping(*newgroup, index, i);
+ }
+ }
+ smpi_group_use(*newgroup);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
}
-int SMPI_MPI_Waitall(int count, MPI_Request requests[], MPI_Status status[])
-{
- return smpi_mpi_waitall(count, requests, status);
+int MPI_Group_excl(MPI_Group group, int n, int* ranks, MPI_Group* newgroup) {
+ int retval, i, size, rank, index;
+
+ smpi_bench_end();
+ if(group == MPI_GROUP_NULL) {
+ retval = MPI_ERR_GROUP;
+ } else if(newgroup == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ if(n == 0) {
+ *newgroup = group;
+ } else if(n == smpi_group_size(group)) {
+ *newgroup = MPI_GROUP_EMPTY;
+ } else {
+ size = smpi_group_size(group) - n;
+ *newgroup = smpi_group_new(size);
+ rank = 0;
+ while(rank < size) {
+ for(i = 0; i < n; i++) {
+ if(ranks[i] == rank) {
+ break;
+ }
+ }
+ if(i >= n) {
+ index = smpi_group_index(group, rank);
+ smpi_group_set_mapping(*newgroup, index, rank);
+ rank++;
+ }
+ }
+ }
+ smpi_group_use(*newgroup);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
}
-int SMPI_MPI_Waitany(int count, MPI_Request requests[], int *index,
- MPI_Status status[])
-{
- return smpi_mpi_waitany(count, requests, index, status);
+int MPI_Group_range_incl(MPI_Group group, int n, int ranges[][3], MPI_Group* newgroup) {
+ int retval, i, j, rank, size, index;
+
+ smpi_bench_end();
+ if(group == MPI_GROUP_NULL) {
+ retval = MPI_ERR_GROUP;
+ } else if(newgroup == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ if(n == 0) {
+ *newgroup = MPI_GROUP_EMPTY;
+ } else {
+ size = 0;
+ for(i = 0; i < n; i++) {
+ for(rank = ranges[i][0]; /* First */
+ rank >= 0 && rank <= ranges[i][1]; /* Last */
+ rank += ranges[i][2] /* Stride */) {
+ size++;
+ }
+ }
+ if(size == smpi_group_size(group)) {
+ *newgroup = group;
+ } else {
+ *newgroup = smpi_group_new(size);
+ j = 0;
+ for(i = 0; i < n; i++) {
+ for(rank = ranges[i][0]; /* First */
+ rank >= 0 && rank <= ranges[i][1]; /* Last */
+ rank += ranges[i][2] /* Stride */) {
+ index = smpi_group_index(group, rank);
+ smpi_group_set_mapping(*newgroup, index, j);
+ j++;
+ }
+ }
+ }
+ }
+ smpi_group_use(*newgroup);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
}
-/**
- * MPI_Bcast
- **/
+int MPI_Group_range_excl(MPI_Group group, int n, int ranges[][3], MPI_Group* newgroup) {
+ int retval, i, newrank, rank, size, index, add;
-/**
- * flat bcast
- **/
-int flat_tree_bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm);
-int flat_tree_bcast(void *buf, int count, MPI_Datatype datatype, int root,
- MPI_Comm comm)
-{
- int rank;
- int retval = MPI_SUCCESS;
- smpi_mpi_request_t request;
-
- rank = smpi_mpi_comm_rank(comm);
- if (rank == root) {
- retval = smpi_create_request(buf, count, datatype, root,
- (root + 1) % comm->size, 0, comm, &request);
- request->forward = comm->size - 1;
- smpi_mpi_isend(request);
- } else {
- retval = smpi_create_request(buf, count, datatype, MPI_ANY_SOURCE, rank,
- 0, comm, &request);
- smpi_mpi_irecv(request);
+ smpi_bench_end();
+ if(group == MPI_GROUP_NULL) {
+ retval = MPI_ERR_GROUP;
+ } else if(newgroup == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ if(n == 0) {
+ *newgroup = group;
+ } else {
+ size = smpi_group_size(group);
+ for(i = 0; i < n; i++) {
+ for(rank = ranges[i][0]; /* First */
+ rank >= 0 && rank <= ranges[i][1]; /* Last */
+ rank += ranges[i][2] /* Stride */) {
+ size--;
+ }
+ }
+ if(size == 0) {
+ *newgroup = MPI_GROUP_EMPTY;
+ } else {
+ *newgroup = smpi_group_new(size);
+ newrank = 0;
+ while(newrank < size) {
+ for(i = 0; i < n; i++) {
+ add = 1;
+ for(rank = ranges[i][0]; /* First */
+ rank >= 0 && rank <= ranges[i][1]; /* Last */
+ rank += ranges[i][2] /* Stride */) {
+ if(rank == newrank) {
+ add = 0;
+ break;
+ }
+ }
+ if(add == 1) {
+ index = smpi_group_index(group, newrank);
+ smpi_group_set_mapping(*newgroup, index, newrank);
+ }
+ }
}
+ }
+ }
+ smpi_group_use(*newgroup);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
- smpi_mpi_wait(request, MPI_STATUS_IGNORE);
- xbt_mallocator_release(smpi_global->request_mallocator, request);
+int MPI_Comm_rank(MPI_Comm comm, int* rank) {
+ int retval;
- return(retval);
+ smpi_bench_end();
+ if(comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else {
+ *rank = smpi_comm_rank(comm);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
+int MPI_Comm_size(MPI_Comm comm, int* size) {
+ int retval;
+
+ smpi_bench_end();
+ if(comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if(size == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ *size = smpi_comm_size(comm);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
}
-/**
- * Bcast user entry point
- **/
-int SMPI_MPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root,
- MPI_Comm comm)
-{
- int retval = MPI_SUCCESS;
+int MPI_Comm_group(MPI_Comm comm, MPI_Group* group) {
+ int retval;
smpi_bench_end();
+ if(comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if(group == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ *group = smpi_comm_group(comm);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
- //retval = flat_tree_bcast(buf, count, datatype, root, comm);
- retval = nary_tree_bcast(buf, count, datatype, root, comm, 2 );
+int MPI_Comm_compare(MPI_Comm comm1, MPI_Comm comm2, int* result) {
+ int retval;
+ smpi_bench_end();
+ if(comm1 == MPI_COMM_NULL || comm2 == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if(result == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ if(comm1 == comm2) { /* Same communicators means same groups */
+ *result = MPI_IDENT;
+ } else {
+ *result = smpi_group_compare(smpi_comm_group(comm1), smpi_comm_group(comm2));
+ if(*result == MPI_IDENT) {
+ *result = MPI_CONGRUENT;
+ }
+ }
+ retval = MPI_SUCCESS;
+ }
smpi_bench_begin();
+ return retval;
+}
+
+int MPI_Comm_dup(MPI_Comm comm, MPI_Comm* newcomm) {
+ int retval;
+ smpi_bench_end();
+ if(comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if(newcomm == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ *newcomm = smpi_comm_new(smpi_comm_group(comm));
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
return retval;
}
+int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm* newcomm) {
+ int retval;
+ smpi_bench_end();
+ if(comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if(group == MPI_GROUP_NULL) {
+ retval = MPI_ERR_GROUP;
+ } else if(newcomm == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ *newcomm = smpi_comm_new(group);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
-#ifdef DEBUG_REDUCE
-/**
- * debugging helper function
- **/
-static void print_buffer_int(void *buf, int len, const char *msg, int rank)
-{
- int tmp, *v;
- printf("**[%d] %s: ", rank, msg);
- for (tmp = 0; tmp < len; tmp++) {
- v = buf;
- printf("[%d]", v[tmp]);
+int MPI_Comm_free(MPI_Comm* comm) {
+ int retval;
+
+ smpi_bench_end();
+ if(comm == NULL) {
+ retval = MPI_ERR_ARG;
+ } else if(*comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else {
+ smpi_comm_destroy(*comm);
+ *comm = MPI_COMM_NULL;
+ retval = MPI_SUCCESS;
}
- printf("\n");
- free(msg);
+ smpi_bench_begin();
+ return retval;
}
-#endif
-/**
- * MPI_Reduce
- **/
-int SMPI_MPI_Reduce(void *sendbuf, void *recvbuf, int count,
- MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
-{
- int retval = MPI_SUCCESS;
- int rank;
- int size;
- int i;
- int tag = 0;
- smpi_mpi_request_t *requests;
- smpi_mpi_request_t request;
-
- smpi_bench_end();
-
- rank = smpi_mpi_comm_rank(comm);
- size = comm->size;
-
- if (rank != root) { // if i am not ROOT, simply send my buffer to root
-
-#ifdef DEBUG_REDUCE
- print_buffer_int(sendbuf, count, xbt_strdup("sndbuf"), rank);
-#endif
- retval =
- smpi_create_request(sendbuf, count, datatype, rank, root, tag, comm,
- &request);
- smpi_mpi_isend(request);
- smpi_mpi_wait(request, MPI_STATUS_IGNORE);
- xbt_mallocator_release(smpi_global->request_mallocator, request);
-
- } else {
- // i am the ROOT: wait for all buffers by creating one request by sender
- int src;
- requests = xbt_malloc((size-1) * sizeof(smpi_mpi_request_t));
-
- void **tmpbufs = xbt_malloc((size-1) * sizeof(void *));
- for (i = 0; i < size-1; i++) {
- // we need 1 buffer per request to store intermediate receptions
- tmpbufs[i] = xbt_malloc(count * datatype->size);
- }
- // root: initiliaze recv buf with my own snd buf
- memcpy(recvbuf, sendbuf, count * datatype->size * sizeof(char));
-
- // i can not use: 'request->forward = size-1;' (which would progagate size-1 receive reqs)
- // since we should op values as soon as one receiving request matches.
- for (i = 0; i < size-1; i++) {
- // reminder: for smpi_create_request() the src is always the process sending.
- src = i < root ? i : i + 1;
- retval = smpi_create_request(tmpbufs[i], count, datatype,
- src, root, tag, comm, &(requests[i]));
- if (NULL != requests[i] && MPI_SUCCESS == retval) {
- if (MPI_SUCCESS == retval) {
- smpi_mpi_irecv(requests[i]);
- }
- }
- }
- // now, wait for completion of all irecv's.
- for (i = 0; i < size-1; i++) {
- int index = MPI_UNDEFINED;
- smpi_mpi_waitany( size-1, requests, &index, MPI_STATUS_IGNORE);
-#ifdef DEBUG_REDUCE
- printf ("MPI_Waitany() unblocked: root received (completes req[index=%d])\n",index);
- print_buffer_int(tmpbufs[index], count, bprintf("tmpbufs[index=%d] (value received)", index),
- rank);
-#endif
-
- // arg 2 is modified
- op->func(tmpbufs[index], recvbuf, &count, &datatype);
-#ifdef DEBUG_REDUCE
- print_buffer_int(recvbuf, count, xbt_strdup("rcvbuf"), rank);
-#endif
- xbt_free(tmpbufs[index]);
- /* FIXME: with the following line, it generates an
- * [xbt_ex/CRITICAL] Conditional list not empty 162518800.
- */
- // xbt_mallocator_release(smpi_global->request_mallocator, requests[index]);
- }
- xbt_free(requests);
- xbt_free(tmpbufs);
- }
- smpi_bench_begin();
- return retval;
+int MPI_Irecv(void* buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Request* request) {
+ int retval;
+
+ smpi_bench_end();
+ if(request == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ *request = smpi_mpi_irecv(buf, count, datatype, src, tag, comm);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
}
-/**
- * MPI_Allreduce
- *
- * Same as MPI_REDUCE except that the result appears in the receive buffer of all the group members.
- **/
-int SMPI_MPI_Allreduce( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
- MPI_Op op, MPI_Comm comm );
-int SMPI_MPI_Allreduce( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
- MPI_Op op, MPI_Comm comm )
-{
- int retval = MPI_SUCCESS;
- int root=1; // arbitrary choice
+int MPI_Isend(void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request* request) {
+ int retval;
+
+ smpi_bench_end();
+ if(request == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ *request = smpi_mpi_isend(buf, count, datatype, dst, tag, comm);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
+int MPI_Recv(void* buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status* status) {
smpi_bench_end();
+ smpi_mpi_recv(buf, count, datatype, src, tag, comm, status);
+ smpi_bench_begin();
+ return MPI_SUCCESS;
+}
- retval = SMPI_MPI_Reduce( sendbuf, recvbuf, count, datatype, op, root, comm);
- if (MPI_SUCCESS != retval)
- return(retval);
+int MPI_Send(void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) {
+ smpi_bench_end();
+ smpi_mpi_send(buf, count, datatype, dst, tag, comm);
+ smpi_bench_begin();
+ return MPI_SUCCESS;
+}
- retval = SMPI_MPI_Bcast( sendbuf, count, datatype, root, comm);
+int MPI_Sendrecv(void* sendbuf, int sendcount, MPI_Datatype sendtype, int dst, int sendtag, void* recvbuf, int recvcount, MPI_Datatype recvtype, int src, int recvtag, MPI_Comm comm, MPI_Status* status) {
+ smpi_bench_end();
+ smpi_mpi_sendrecv(sendbuf, sendcount, sendtype, dst, sendtag, recvbuf, recvcount, recvtype, src, recvtag, comm, status);
smpi_bench_begin();
- return( retval );
+ return MPI_SUCCESS;
}
+int MPI_Sendrecv_replace(void* buf, int count, MPI_Datatype datatype, int dst, int sendtag, int src, int recvtag, MPI_Comm comm, MPI_Status* status) {
+ //TODO: suboptimal implementation
+ void* recvbuf;
+ int retval, size;
-/**
- * MPI_Scatter user entry point
- **/
-//int SMPI_MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype datatype,
-// void *recvbuf, int recvcount, MPI_Datatype recvtype,int root,
-// MPI_Comm comm);
-int SMPI_MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype datatype,
- void *recvbuf, int recvcount, MPI_Datatype recvtype,
- int root, MPI_Comm comm)
-{
- int retval = MPI_SUCCESS;
- int i;
- int cnt=0;
- int rank;
- int tag=0;
- char *cbuf; // to manipulate the void * buffers
- smpi_mpi_request_t *requests;
- smpi_mpi_request_t request;
- smpi_mpi_status_t status;
+ size = smpi_datatype_size(datatype) * count;
+ recvbuf = xbt_new(char, size);
+ retval = MPI_Sendrecv(buf, count, datatype, dst, sendtag, recvbuf, count, datatype, src, recvtag, comm, status);
+ memcpy(buf, recvbuf, size * sizeof(char));
+ xbt_free(recvbuf);
+ return retval;
+}
+int MPI_Test(MPI_Request* request, int* flag, MPI_Status* status) {
+ int retval;
smpi_bench_end();
+ if(request == NULL || flag == NULL) {
+ retval = MPI_ERR_ARG;
+ } else if(*request == MPI_REQUEST_NULL) {
+ retval = MPI_ERR_REQUEST;
+ } else {
+ *flag = smpi_mpi_test(request, status);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
- rank = smpi_mpi_comm_rank(comm);
+int MPI_Testany(int count, MPI_Request requests[], int* index, int* flag, MPI_Status* status) {
+ int retval;
- requests = xbt_malloc((comm->size-1) * sizeof(smpi_mpi_request_t));
- if (rank == root) {
- // i am the root: distribute my sendbuf
- for (i=0; i < comm->size; i++) {
- cbuf = sendbuf;
- cbuf += i*sendcount*datatype->size;
- if ( i!=root ) { // send to processes ...
+ smpi_bench_end();
+ if(index == NULL || flag == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ *flag = smpi_mpi_testany(count, requests, index, status);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
- retval = smpi_create_request((void *)cbuf, sendcount,
- datatype, root, i, tag, comm, &(requests[cnt++]));
- if (NULL != requests[cnt] && MPI_SUCCESS == retval) {
- if (MPI_SUCCESS == retval) {
- smpi_mpi_isend(requests[cnt]);
- }
- }
- cnt++;
- }
- else { // ... except if it's me.
- memcpy(recvbuf, (void *)cbuf, recvcount*recvtype->size*sizeof(char));
- }
- }
- for(i=0; i<cnt; i++) { // wait for send to complete
- /* FIXME: waitall() should be slightly better */
- smpi_mpi_wait(requests[i], &status);
- xbt_mallocator_release(smpi_global->request_mallocator, requests[i]);
+int MPI_Wait(MPI_Request* request, MPI_Status* status) {
+ int retval;
- }
- }
- else { // i am a non-root process: wait data from the root
- retval = smpi_create_request(recvbuf,recvcount,
- recvtype, root, rank, tag, comm, &request);
- if (NULL != request && MPI_SUCCESS == retval) {
- if (MPI_SUCCESS == retval) {
- smpi_mpi_irecv(request);
- }
- }
- smpi_mpi_wait(request, &status);
- xbt_mallocator_release(smpi_global->request_mallocator, request);
+ smpi_bench_end();
+ if(request == NULL) {
+ retval = MPI_ERR_ARG;
+ } else if(*request == MPI_REQUEST_NULL) {
+ retval = MPI_ERR_REQUEST;
+ } else {
+ smpi_mpi_wait(request, status);
+ retval = MPI_SUCCESS;
}
- xbt_free(requests);
+ smpi_bench_begin();
+ return retval;
+}
+
+int MPI_Waitany(int count, MPI_Request requests[], int* index, MPI_Status* status) {
+ int retval;
+ smpi_bench_end();
+ if(index == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ *index = smpi_mpi_waitany(count, requests, status);
+ retval = MPI_SUCCESS;
+ }
smpi_bench_begin();
+ return retval;
+}
+int MPI_Waitall(int count, MPI_Request requests[], MPI_Status status[]) {
+ smpi_bench_end();
+ smpi_mpi_waitall(count, requests, status);
+ smpi_bench_begin();
+ return MPI_SUCCESS;
+}
+
+int MPI_Waitsome(int incount, MPI_Request requests[], int* outcount, int* indices, MPI_Status status[]) {
+ int retval;
+
+ smpi_bench_end();
+ if(outcount == NULL || indices == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ *outcount = smpi_mpi_waitsome(incount, requests, indices, status);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
return retval;
}
+int MPI_Bcast(void* buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm) {
+ int retval;
+ smpi_bench_end();
+ if(comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else {
+ smpi_mpi_bcast(buf, count, datatype, root, comm);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
+int MPI_Barrier(MPI_Comm comm) {
+ int retval;
+ smpi_bench_end();
+ if(comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else {
+ smpi_mpi_barrier(comm);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
+int MPI_Gather(void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm) {
+ int retval;
-// used by comm_split to sort ranks based on key values
-int smpi_compare_rankkeys(const void *a, const void *b);
-int smpi_compare_rankkeys(const void *a, const void *b)
-{
- int *x = (int *) a;
- int *y = (int *) b;
+ smpi_bench_end();
+ if(comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if(sendtype == MPI_DATATYPE_NULL || recvtype == MPI_DATATYPE_NULL) {
+ retval = MPI_ERR_TYPE;
+ } else {
+ smpi_mpi_gather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
- if (x[1] < y[1])
- return -1;
+int MPI_Gatherv(void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int* recvcounts, int* displs, MPI_Datatype recvtype, int root, MPI_Comm comm) {
+ int retval;
- if (x[1] == y[1]) {
- if (x[0] < y[0])
- return -1;
- if (x[0] == y[0])
- return 0;
- return 1;
+ smpi_bench_end();
+ if(comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if(sendtype == MPI_DATATYPE_NULL || recvtype == MPI_DATATYPE_NULL) {
+ retval = MPI_ERR_TYPE;
+ } else if(recvcounts == NULL || displs == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ smpi_mpi_gatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, root, comm);
+ retval = MPI_SUCCESS;
}
+ smpi_bench_begin();
+ return retval;
+}
+
+int MPI_Allgather(void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm) {
+ int retval;
- return 1;
+ smpi_bench_end();
+ if(comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if(sendtype == MPI_DATATYPE_NULL || recvtype == MPI_DATATYPE_NULL) {
+ retval = MPI_ERR_TYPE;
+ } else {
+ smpi_mpi_allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
}
-int SMPI_MPI_Comm_split(MPI_Comm comm, int color, int key,
- MPI_Comm * comm_out)
-{
- int retval = MPI_SUCCESS;
+int MPI_Allgatherv(void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int* recvcounts, int* displs, MPI_Datatype recvtype, MPI_Comm comm) {
+ int retval;
+
+ smpi_bench_end();
+ if(comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if(sendtype == MPI_DATATYPE_NULL || recvtype == MPI_DATATYPE_NULL) {
+ retval = MPI_ERR_TYPE;
+ } else if(recvcounts == NULL || displs == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ smpi_mpi_allgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
- int index, rank;
- smpi_mpi_request_t request;
- int colorkey[2];
- smpi_mpi_status_t status;
+int MPI_Scatter(void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm) {
+ int retval;
smpi_bench_end();
+ if(comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if(sendtype == MPI_DATATYPE_NULL || recvtype == MPI_DATATYPE_NULL) {
+ retval = MPI_ERR_TYPE;
+ } else {
+ smpi_mpi_scatter(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
- // FIXME: need to test parameters
+int MPI_Scatterv(void* sendbuf, int* sendcounts, int* displs, MPI_Datatype sendtype, void* recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm) {
+ int retval;
- index = smpi_process_index();
- rank = comm->index_to_rank_map[index];
+ smpi_bench_end();
+ if(comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if(sendtype == MPI_DATATYPE_NULL || recvtype == MPI_DATATYPE_NULL) {
+ retval = MPI_ERR_TYPE;
+ } else if(sendcounts == NULL || displs == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ smpi_mpi_scatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
- // default output
- comm_out = NULL;
+int MPI_Reduce(void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm) {
+ int retval;
- // root node does most of the real work
- if (0 == rank) {
- int colormap[comm->size];
- int keymap[comm->size];
- int rankkeymap[comm->size * 2];
- int i, j;
- smpi_mpi_communicator_t tempcomm = NULL;
- int count;
- int indextmp;
+ smpi_bench_end();
+ if(comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if(datatype == MPI_DATATYPE_NULL || op == MPI_OP_NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ smpi_mpi_reduce(sendbuf, recvbuf, count, datatype, op, root, comm);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
- colormap[0] = color;
- keymap[0] = key;
+int MPI_Allreduce(void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) {
+ int retval;
- // FIXME: use scatter/gather or similar instead of individual comms
- for (i = 1; i < comm->size; i++) {
- retval = smpi_create_request(colorkey, 2, MPI_INT, MPI_ANY_SOURCE,
- rank, MPI_ANY_TAG, comm, &request);
- smpi_mpi_irecv(request);
- smpi_mpi_wait(request, &status);
- colormap[status.MPI_SOURCE] = colorkey[0];
- keymap[status.MPI_SOURCE] = colorkey[1];
- xbt_mallocator_release(smpi_global->request_mallocator, request);
- }
+ smpi_bench_end();
+ if(comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if(datatype == MPI_DATATYPE_NULL) {
+ retval = MPI_ERR_TYPE;
+ } else if(op == MPI_OP_NULL) {
+ retval = MPI_ERR_OP;
+ } else {
+ smpi_mpi_allreduce(sendbuf, recvbuf, count, datatype, op, comm);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
- for (i = 0; i < comm->size; i++) {
- if (MPI_UNDEFINED == colormap[i]) {
- continue;
- }
- // make a list of nodes with current color and sort by keys
- count = 0;
- for (j = i; j < comm->size; j++) {
- if (colormap[i] == colormap[j]) {
- colormap[j] = MPI_UNDEFINED;
- rankkeymap[count * 2] = j;
- rankkeymap[count * 2 + 1] = keymap[j];
- count++;
- }
- }
- qsort(rankkeymap, count, sizeof(int) * 2, &smpi_compare_rankkeys);
-
- // new communicator
- tempcomm = xbt_new(s_smpi_mpi_communicator_t, 1);
- tempcomm->barrier_count = 0;
- tempcomm->size = count;
- tempcomm->barrier_mutex = SIMIX_mutex_init();
- tempcomm->barrier_cond = SIMIX_cond_init();
- tempcomm->rank_to_index_map = xbt_new(int, count);
- tempcomm->index_to_rank_map = xbt_new(int, smpi_global->process_count);
- for (j = 0; j < smpi_global->process_count; j++) {
- tempcomm->index_to_rank_map[j] = -1;
- }
- for (j = 0; j < count; j++) {
- indextmp = comm->rank_to_index_map[rankkeymap[j * 2]];
- tempcomm->rank_to_index_map[j] = indextmp;
- tempcomm->index_to_rank_map[indextmp] = j;
- }
- for (j = 0; j < count; j++) {
- if (rankkeymap[j * 2]) {
- retval = smpi_create_request(&j, 1, MPI_INT, 0,
- rankkeymap[j * 2], 0, comm, &request);
- request->data = tempcomm;
- smpi_mpi_isend(request);
- smpi_mpi_wait(request, &status);
- xbt_mallocator_release(smpi_global->request_mallocator, request);
- } else {
- *comm_out = tempcomm;
- }
- }
- }
+int MPI_Reduce_scatter(void* sendbuf, void* recvbuf, int* recvcounts, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) {
+ int retval, i, rank, size, count;
+ int* displs;
+
+ smpi_bench_end();
+ if(comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if(datatype == MPI_DATATYPE_NULL) {
+ retval = MPI_ERR_TYPE;
+ } else if(op == MPI_OP_NULL) {
+ retval = MPI_ERR_OP;
+ } else if(recvcounts == NULL) {
+ retval = MPI_ERR_ARG;
} else {
- colorkey[0] = color;
- colorkey[1] = key;
- retval = smpi_create_request(colorkey, 2, MPI_INT, rank, 0, 0, comm,
- &request);
- smpi_mpi_isend(request);
- smpi_mpi_wait(request, &status);
- xbt_mallocator_release(smpi_global->request_mallocator, request);
- if (MPI_UNDEFINED != color) {
- retval = smpi_create_request(colorkey, 1, MPI_INT, 0, rank, 0, comm,
- &request);
- smpi_mpi_irecv(request);
- smpi_mpi_wait(request, &status);
- *comm_out = request->data;
+ /* arbitrarily choose root as rank 0 */
+ /* TODO: faster direct implementation ? */
+ rank = smpi_comm_rank(comm);
+ size = smpi_comm_size(comm);
+ count = 0;
+ displs = xbt_new(int, size);
+ for(i = 0; i < size; i++) {
+ count += recvcounts[i];
+ displs[i] = 0;
}
+ smpi_mpi_reduce(sendbuf, recvbuf, count, datatype, op, 0, comm);
+ smpi_mpi_scatterv(recvbuf, recvcounts, displs, datatype, recvbuf, recvcounts[rank], datatype, 0, comm);
+ xbt_free(displs);
+ retval = MPI_SUCCESS;
}
-
smpi_bench_begin();
-
return retval;
}
-double SMPI_MPI_Wtime(void)
-{
- return (SIMIX_get_clock());
+/**
+ * MPI_Alltoall user entry point
+ *
+ * Uses the logic of OpenMPI (upto 1.2.7 or greater) for the optimizations
+ * ompi/mca/coll/tuned/coll_tuned_module.c
+ **/
+
+int MPI_Alltoall(void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm) {
+ int retval, size, sendsize;
+
+ smpi_bench_end();
+ if(comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if(sendtype == MPI_DATATYPE_NULL || recvtype == MPI_DATATYPE_NULL) {
+ retval = MPI_ERR_TYPE;
+ } else {
+ size = smpi_comm_size(comm);
+ sendsize = smpi_datatype_size(sendtype) * sendcount;
+ if(sendsize < 200 && size > 12) {
+ retval = smpi_coll_tuned_alltoall_bruck(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm);
+ } else if(sendsize < 3000) {
+ retval = smpi_coll_tuned_alltoall_basic_linear(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm);
+ } else {
+ retval = smpi_coll_tuned_alltoall_pairwise(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm);
+ }
+ }
+ smpi_bench_begin();
+ return retval;
}
+int MPI_Alltoallv(void* sendbuf, int* sendcounts, int* senddisps, MPI_Datatype sendtype, void* recvbuf, int *recvcounts, int* recvdisps, MPI_Datatype recvtype, MPI_Comm comm) {
+ int retval;
+ smpi_bench_end();
+ if(comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if(sendtype == MPI_DATATYPE_NULL || recvtype == MPI_DATATYPE_NULL) {
+ retval = MPI_ERR_TYPE;
+ } else if(sendcounts == NULL || senddisps == NULL || recvcounts == NULL || recvdisps == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ retval = smpi_coll_basic_alltoallv(sendbuf, sendcounts, senddisps, sendtype, recvbuf, recvcounts, recvdisps, recvtype, comm);
+ }
+ smpi_bench_begin();
+ return retval;
+}