Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
Merge branch 'master' of git+ssh://scm.gforge.inria.fr//gitroot/simgrid/simgrid
[simgrid.git] / src / smpi / internals / smpi_global.cpp
index 1ccc8bf..351a95e 100644 (file)
@@ -3,21 +3,20 @@
 /* This program is free software; you can redistribute it and/or modify it
  * under the terms of the license (GNU LGPL) which comes with this package. */
 
-#include "mc/mc.h"
-#include "simgrid/s4u/Mailbox.hpp"
-#include "simgrid/s4u/Host.hpp"
-#include "src/msg/msg_private.h"
-#include "src/simix/smx_private.h"
-#include "src/surf/surf_interface.hpp"
 #include "SmpiHost.hpp"
-#include "xbt/config.hpp"
-#include "private.h"
+#include "mc/mc.h"
 #include "private.hpp"
+#include "simgrid/s4u/Host.hpp"
+#include "simgrid/s4u/Mailbox.hpp"
 #include "smpi_coll.hpp"
 #include "smpi_comm.hpp"
 #include "smpi_group.hpp"
 #include "smpi_info.hpp"
 #include "smpi_process.hpp"
+#include "src/msg/msg_private.hpp"
+#include "src/simix/smx_private.hpp"
+#include "src/surf/surf_interface.hpp"
+#include "xbt/config.hpp"
 
 #include <cfloat> /* DBL_MAX */
 #include <dlfcn.h>
@@ -59,10 +58,23 @@ int smpi_universe_size = 0;
 int* index_to_process_data = nullptr;
 extern double smpi_total_benched_time;
 xbt_os_timer_t global_timer;
+/**
+ * Setting MPI_COMM_WORLD to MPI_COMM_UNINITIALIZED (it's a variable)
+ * is important because the implementation of MPI_Comm checks
+ * "this == MPI_COMM_UNINITIALIZED"? If yes, it uses smpi_process()->comm_world()
+ * instead of "this".
+ * This is basically how we only have one global variable but all processes have
+ * different communicators (basically, the one their SMPI instance uses).
+ *
+ * See smpi_comm.cpp and the functions therein for details.
+ */
 MPI_Comm MPI_COMM_WORLD = MPI_COMM_UNINITIALIZED;
 MPI_Errhandler *MPI_ERRORS_RETURN = nullptr;
 MPI_Errhandler *MPI_ERRORS_ARE_FATAL = nullptr;
 MPI_Errhandler *MPI_ERRHANDLER_NULL = nullptr;
+// No instance gets manually created; check also the smpirun.in script as
+// this default name is used there as well (when the <actor> tag is generated).
+static const char* smpi_default_instance_name = "smpirun";
 static simgrid::config::Flag<double> smpi_wtime_sleep(
   "smpi/wtime", "Minimum time to inject inside a call to MPI_Wtime", 0.0);
 static simgrid::config::Flag<double> smpi_init_sleep(
@@ -173,24 +185,23 @@ void smpi_comm_copy_buffer_callback(smx_activity_t synchro, void *buff, size_t b
   auto private_blocks = merge_private_blocks(src_private_blocks, dst_private_blocks);
   check_blocks(private_blocks, buff_size);
   void* tmpbuff=buff;
-  if((smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) && (static_cast<char*>(buff) >= smpi_start_data_exe)
-      && (static_cast<char*>(buff) < smpi_start_data_exe + smpi_size_data_exe )
-    ){
-       XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !");
-
-       smpi_switch_data_segment(
-           static_cast<simgrid::smpi::Process*>((static_cast<simgrid::msg::ActorExt*>(comm->src_proc->userdata)->data))
-               ->index());
-       tmpbuff = static_cast<void*>(xbt_malloc(buff_size));
-       memcpy_private(tmpbuff, buff, private_blocks);
+  if ((smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) && (static_cast<char*>(buff) >= smpi_data_exe_start) &&
+      (static_cast<char*>(buff) < smpi_data_exe_start + smpi_data_exe_size)) {
+    XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !");
+
+    smpi_switch_data_segment(
+        static_cast<simgrid::smpi::Process*>((static_cast<simgrid::msg::ActorExt*>(comm->src_proc->userdata)->data))
+            ->index());
+    tmpbuff = static_cast<void*>(xbt_malloc(buff_size));
+    memcpy_private(tmpbuff, buff, private_blocks);
   }
 
-  if((smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) && ((char*)comm->dst_buff >= smpi_start_data_exe)
-      && ((char*)comm->dst_buff < smpi_start_data_exe + smpi_size_data_exe )){
-       XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment");
-       smpi_switch_data_segment(
-           static_cast<simgrid::smpi::Process*>((static_cast<simgrid::msg::ActorExt*>(comm->dst_proc->userdata)->data))
-               ->index());
+  if ((smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) && ((char*)comm->dst_buff >= smpi_data_exe_start) &&
+      ((char*)comm->dst_buff < smpi_data_exe_start + smpi_data_exe_size)) {
+    XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment");
+    smpi_switch_data_segment(
+        static_cast<simgrid::smpi::Process*>((static_cast<simgrid::msg::ActorExt*>(comm->dst_proc->userdata)->data))
+            ->index());
   }
   XBT_DEBUG("Copying %zu bytes from %p to %p", buff_size, tmpbuff,comm->dst_buff);
   memcpy_private(comm->dst_buff, tmpbuff, private_blocks);
@@ -235,8 +246,6 @@ int smpi_enabled() {
 
 void smpi_global_init()
 {
-  MPI_Group group;
-
   if (not MC_is_active()) {
     global_timer = xbt_os_timer_new();
     xbt_os_walltimer_start(global_timer);
@@ -331,43 +340,38 @@ void smpi_global_init()
   }
 #endif
 
-  int smpirun = 0;
-  msg_bar_t finalization_barrier = nullptr;
-  if (process_count == 0){
-    process_count = SIMIX_process_count();
-    smpirun=1;
-    finalization_barrier = MSG_barrier_init(process_count);
+  if (index_to_process_data == nullptr) {
+    index_to_process_data = new int[SIMIX_process_count()];
+  }
+
+  bool smpirun = 0;
+  if (process_count == 0) { // The program has been dispatched but no other
+                            // SMPI instances have been registered. We're using smpirun.
+    smpirun = true;
+    SMPI_app_instance_register(smpi_default_instance_name, nullptr,
+                               SIMIX_process_count()); // This call has a side effect on process_count...
+    MPI_COMM_WORLD = *smpi_deployment_comm_world(smpi_default_instance_name);
   }
   smpi_universe_size = process_count;
   process_data       = new simgrid::smpi::Process*[process_count];
   for (int i = 0; i < process_count; i++) {
-    process_data[i] = new simgrid::smpi::Process(i, finalization_barrier);
-  }
-  //if the process was launched through smpirun script we generate a global mpi_comm_world
-  //if not, we let MPI_COMM_NULL, and the comm world will be private to each mpi instance
-  if (smpirun) {
-    group = new  simgrid::smpi::Group(process_count);
-    MPI_COMM_WORLD = new  simgrid::smpi::Comm(group, nullptr);
-    MPI_Attr_put(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, reinterpret_cast<void *>(process_count));
-
-    for (int i = 0; i < process_count; i++)
-      group->set_mapping(i, i);
+    if (smpirun) {
+      process_data[i] = new simgrid::smpi::Process(i, smpi_deployment_finalization_barrier(smpi_default_instance_name));
+      smpi_deployment_register_process(smpi_default_instance_name, i, i);
+    } else {
+      // TODO We can pass a nullptr here because Process::set_data() assigns the
+      // barrier from the instance anyway. This is ugly and should be changed
+      process_data[i] = new simgrid::smpi::Process(i, nullptr);
+    }
   }
 }
 
 void smpi_global_destroy()
 {
-  int count = smpi_process_count();
-
   smpi_bench_destroy();
   smpi_shared_destroy();
-  if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
-      delete MPI_COMM_WORLD->group();
-      MSG_barrier_destroy(process_data[0]->finalization_barrier());
-  }else{
-      smpi_deployment_cleanup_instances();
-  }
-  for (int i = 0; i < count; i++) {
+  smpi_deployment_cleanup_instances();
+  for (int i = 0, count = smpi_process_count(); i < count; i++) {
     if(process_data[i]->comm_self()!=MPI_COMM_NULL){
       simgrid::smpi::Comm::destroy(process_data[i]->comm_self());
     }
@@ -381,13 +385,8 @@ void smpi_global_destroy()
   delete[] process_data;
   process_data = nullptr;
 
-  if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
-    MPI_COMM_WORLD->cleanup_smp();
-    MPI_COMM_WORLD->cleanup_attr<simgrid::smpi::Comm>();
-    if(simgrid::smpi::Colls::smpi_coll_cleanup_callback!=nullptr)
-      simgrid::smpi::Colls::smpi_coll_cleanup_callback();
-    delete MPI_COMM_WORLD;
-  }
+  if (simgrid::smpi::Colls::smpi_coll_cleanup_callback != nullptr)
+    simgrid::smpi::Colls::smpi_coll_cleanup_callback();
 
   MPI_COMM_WORLD = MPI_COMM_NULL;
 
@@ -666,9 +665,8 @@ int smpi_main(const char* executable, int argc, char *argv[])
       "You may want to use sampling functions or trace replay to reduce this.");
     }
   }
-  int count = smpi_process_count();
   int ret   = 0;
-  for (int i = 0; i < count; i++) {
+  for (int i = 0, count = smpi_process_count(); i < count; i++) {
     if(process_data[i]->return_value()!=0){
       ret=process_data[i]->return_value();//return first non 0 value
       break;
@@ -690,7 +688,7 @@ void SMPI_init(){
   TRACE_smpi_alloc();
   simgrid::surf::surfExitCallbacks.connect(TRACE_smpi_release);
   if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP)
-    smpi_initialize_global_memory_segments();
+    smpi_backup_global_memory_segment();
 }
 
 void SMPI_finalize(){