int SMPI_MPI_Init(int *argc, char ***argv)
{
- smpi_mpi_init();
+ smpi_process_init(argc,argv);
smpi_bench_begin();
return MPI_SUCCESS;
}
int SMPI_MPI_Finalize()
{
smpi_bench_end();
- smpi_mpi_finalize();
+ smpi_process_finalize();
return MPI_SUCCESS;
}
// right now this just exits the current node, should send abort signal to all
-// hosts in the communicator;
+// hosts in the communicator (TODO)
int SMPI_MPI_Abort(MPI_Comm comm, int errorcode)
{
smpi_exit(errorcode);
return smpi_mpi_wait(*request, status);
}
+int SMPI_MPI_Waitall(int count, MPI_Request requests[], MPI_Status status[]) {
+ return smpi_mpi_waitall(count, requests,status);
+}
+int SMPI_MPI_Waitany(int count, MPI_Request requests[], int *index, MPI_Status status[]) {
+ return smpi_mpi_waitany(count, requests, index,status);
+}
+/**
+ * MPI_Bcast
+ **/
int SMPI_MPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root,
MPI_Comm comm)
{
return retval;
}
+/**
+ * MPI_Reduce
+ **/
+
+int SMPI_MPI_Reduce( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
+ MPI_Op op, int root, MPI_Comm comm )
+{
+ int retval = MPI_SUCCESS;
+ int rank;
+ int size;
+ int i;
+ smpi_mpi_request_t *tabrequest;
+
+ smpi_bench_end();
+
+ rank = smpi_mpi_comm_rank(comm);
+ size = comm->size;
+
+ tabrequest = xbt_malloc((size)*sizeof(smpi_mpi_request_t));
+
+ if (rank != root) { // if i am not root, simply send my buffer to root
+ retval = smpi_create_request(sendbuf, count, datatype,
+ rank, root, 0, comm, &(tabrequest[rank]));
+ smpi_mpi_isend(tabrequest[rank]);
+ smpi_mpi_wait(tabrequest[rank], MPI_STATUS_IGNORE);
+ //printf("DEBUG: rank %d sent my sendbuf to root (rank %d)\n",rank,root);
+ } else {
+ // i am the root: wait for all buffers by creating requests
+ // i can not use: 'request->forward = size-1;' (which would progagate size-1 receive reqs)
+ // since we should op values as soon as one receiving request matches.
+ for (i=0; i<comm->size; i++) {
+ if ( rank != i ) { // except for me
+ // reminder: for smpi_create_request() the src is always the process sending.
+ retval = smpi_create_request(recvbuf, count, datatype, MPI_ANY_SOURCE, root,
+ 0, comm, &(tabrequest[i]));
+ if (NULL != tabrequest[i] && MPI_SUCCESS == retval) {
+ if (MPI_SUCCESS == retval) {
+ smpi_mpi_irecv(tabrequest[i]);
+ }
+ }
+ }
+ }
+ // now, wait for completion of all irecv's.
+ // FIXME: we should implement smpi_wait_all for a more asynchronous behavior
+ for (i=0; i<comm->size; i++) {
+ if ( rank != i ) { // except for me
+ smpi_mpi_wait(tabrequest[i], MPI_STATUS_IGNORE);
+
+ // FIXME: the core part is here. To be written ...
+
+ fprintf(stderr,"[smpi] %s:%d : MPI_Reduce *Not yet implemented*.\n",__FILE__,__LINE__);
+ }
+ }
+
+ }
+ for (i=0; i<comm->size; i++)
+ xbt_mallocator_release(smpi_global->request_mallocator, tabrequest[i]);
+
+ smpi_bench_begin();
+
+ return retval;
+}
+
// used by comm_split to sort ranks based on key values
int smpi_compare_rankkeys(const void *a, const void *b);
int smpi_compare_rankkeys(const void *a, const void *b)
// FIXME: need to test parameters
- index = smpi_host_index();
+ index = smpi_process_index();
rank = comm->index_to_rank_map[index];
// default output
tempcomm->barrier_mutex = SIMIX_mutex_init();
tempcomm->barrier_cond = SIMIX_cond_init();
tempcomm->rank_to_index_map = xbt_new(int, count);
- tempcomm->index_to_rank_map = xbt_new(int, smpi_global->host_count);
- for (j = 0; j < smpi_global->host_count; j++) {
+ tempcomm->index_to_rank_map = xbt_new(int, smpi_global->process_count);
+ for (j = 0; j < smpi_global->process_count; j++) {
tempcomm->index_to_rank_map[j] = -1;
}
for (j = 0; j < count; j++) {
return retval;
}
+
+double SMPI_MPI_Wtime( void )
+{
+ return ( SIMIX_get_clock() );
+}