Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
Added MPI_COMM_SELF.
authorpini <pini@48e7efb5-ca39-0410-a469-dd3cf9ba447f>
Thu, 25 Mar 2010 14:41:39 +0000 (14:41 +0000)
committerpini <pini@48e7efb5-ca39-0410-a469-dd3cf9ba447f>
Thu, 25 Mar 2010 14:41:39 +0000 (14:41 +0000)
git-svn-id: svn+ssh://scm.gforge.inria.fr/svn/simgrid/simgrid/trunk@7377 48e7efb5-ca39-0410-a469-dd3cf9ba447f

include/smpi/smpi.h
src/smpi/smpi_global.c

index 7ce1977..a4a815a 100644 (file)
@@ -134,6 +134,7 @@ typedef struct s_smpi_mpi_communicator* MPI_Comm;
 
 #define MPI_COMM_NULL NULL
 extern MPI_Comm MPI_COMM_WORLD;
+#define MPI_COMM_SELF smpi_process_comm_self()
 
 struct s_smpi_mpi_request;
 typedef struct s_smpi_mpi_request* MPI_Request;
@@ -216,6 +217,7 @@ XBT_PUBLIC(int) MPI_Comm_split(MPI_Comm comm, int color, int key,
 */
 // smpi functions
 XBT_IMPORT_NO_EXPORT(int) smpi_simulated_main(int argc, char** argv);
+XBT_PUBLIC(MPI_Comm) smpi_process_comm_self(void);
 /*
 XBT_PUBLIC(unsigned int) smpi_sleep(unsigned int);
 XBT_PUBLIC(void) smpi_exit(int);
index 36ecab6..2eaabc4 100644 (file)
@@ -13,6 +13,7 @@ typedef struct s_smpi_process_data {
   xbt_fifo_t pending_sent;
   xbt_fifo_t pending_recv;
   xbt_os_timer_t timer;
+  MPI_Comm comm_self;
 } s_smpi_process_data_t;
 
 static smpi_process_data_t* process_data = NULL;
@@ -44,6 +45,12 @@ xbt_os_timer_t smpi_process_timer(void) {
   return data->timer;
 }
 
+MPI_Comm smpi_process_comm_self(void) {
+  smpi_process_data_t data = smpi_process_data();
+
+  return data->comm_self;
+}
+
 void smpi_process_post_send(MPI_Comm comm, MPI_Request request) {
   int index = smpi_group_index(smpi_comm_group(comm), request->dst);
   smpi_process_data_t data = smpi_process_remote_data(index);
@@ -116,6 +123,9 @@ void smpi_global_init(void) {
     process_data[i]->pending_sent = xbt_fifo_new();
     process_data[i]->pending_recv = xbt_fifo_new();
     process_data[i]->timer = xbt_os_timer_new();
+    group = smpi_group_new(1);
+    process_data[i]->comm_self = smpi_comm_new(group);
+    smpi_group_set_mapping(group, i, 0);
   }
   group = smpi_group_new(process_count);
   MPI_COMM_WORLD = smpi_comm_new(group);
@@ -131,6 +141,7 @@ void smpi_global_destroy(void) {
   smpi_comm_destroy(MPI_COMM_WORLD);
   MPI_COMM_WORLD = MPI_COMM_NULL;
   for(i = 0; i < count; i++) {
+    smpi_comm_destroy(process_data[i]->comm_self);
     xbt_os_timer_free(process_data[i]->timer);
     xbt_fifo_free(process_data[i]->pending_recv);
     xbt_fifo_free(process_data[i]->pending_sent);