Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
fix mismatched new[]/free
[simgrid.git] / src / smpi / internals / smpi_global.cpp
index 5fc43b3..6ea9625 100644 (file)
@@ -58,10 +58,23 @@ int smpi_universe_size = 0;
 int* index_to_process_data = nullptr;
 extern double smpi_total_benched_time;
 xbt_os_timer_t global_timer;
+/**
+ * Setting MPI_COMM_WORLD to MPI_COMM_UNINITIALIZED (it's a variable)
+ * is important because the implementation of MPI_Comm checks
+ * "this == MPI_COMM_UNINITIALIZED"? If yes, it uses smpi_process()->comm_world()
+ * instead of "this".
+ * This is basically how we only have one global variable but all processes have
+ * different communicators (basically, the one their SMPI instance uses).
+ *
+ * See smpi_comm.cpp and the functions therein for details.
+ */
 MPI_Comm MPI_COMM_WORLD = MPI_COMM_UNINITIALIZED;
 MPI_Errhandler *MPI_ERRORS_RETURN = nullptr;
 MPI_Errhandler *MPI_ERRORS_ARE_FATAL = nullptr;
 MPI_Errhandler *MPI_ERRHANDLER_NULL = nullptr;
+// No instance gets manually created; check also the smpirun.in script as
+// this default name is used there as well (when the <actor> tag is generated).
+static const char* smpi_default_instance_name = "smpirun";
 static simgrid::config::Flag<double> smpi_wtime_sleep(
   "smpi/wtime", "Minimum time to inject inside a call to MPI_Wtime", 0.0);
 static simgrid::config::Flag<double> smpi_init_sleep(
@@ -233,15 +246,13 @@ int smpi_enabled() {
 
 void smpi_global_init()
 {
-  MPI_Group group;
-
   if (not MC_is_active()) {
     global_timer = xbt_os_timer_new();
     xbt_os_walltimer_start(global_timer);
   }
 
-  if (xbt_cfg_get_string("smpi/comp-adjustment-file")[0] != '\0') {
-    std::string filename {xbt_cfg_get_string("smpi/comp-adjustment-file")};
+  std::string filename = xbt_cfg_get_string("smpi/comp-adjustment-file");
+  if (not filename.empty()) {
     std::ifstream fstream(filename);
     if (not fstream.is_open()) {
       xbt_die("Could not open file %s. Does it exist?", filename.c_str());
@@ -267,7 +278,7 @@ void smpi_global_init()
   // and the (computed) event_set.
   std::map</* computation unit name */ std::string, papi_process_data> units2papi_setup;
 
-  if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
+  if (not xbt_cfg_get_string("smpi/papi-events").empty()) {
     if (PAPI_library_init(PAPI_VER_CURRENT) != PAPI_VER_CURRENT)
       XBT_ERROR("Could not initialize PAPI library; is it correctly installed and linked?"
                 " Expected version is %i",
@@ -275,7 +286,7 @@ void smpi_global_init()
 
     typedef boost::tokenizer<boost::char_separator<char>> Tokenizer;
     boost::char_separator<char> separator_units(";");
-    std::string str = std::string(xbt_cfg_get_string("smpi/papi-events"));
+    std::string str = xbt_cfg_get_string("smpi/papi-events");
     Tokenizer tokens(str, separator_units);
 
     // Iterate over all the computational units. This could be processes, hosts, threads, ranks... You name it.
@@ -329,43 +340,38 @@ void smpi_global_init()
   }
 #endif
 
-  int smpirun = 0;
-  msg_bar_t finalization_barrier = nullptr;
-  if (process_count == 0){
-    process_count = SIMIX_process_count();
-    smpirun=1;
-    finalization_barrier = MSG_barrier_init(process_count);
+  if (index_to_process_data == nullptr) {
+    index_to_process_data = new int[SIMIX_process_count()];
+  }
+
+  bool smpirun = 0;
+  if (process_count == 0) { // The program has been dispatched but no other
+                            // SMPI instances have been registered. We're using smpirun.
+    smpirun = true;
+    SMPI_app_instance_register(smpi_default_instance_name, nullptr,
+                               SIMIX_process_count()); // This call has a side effect on process_count...
+    MPI_COMM_WORLD = *smpi_deployment_comm_world(smpi_default_instance_name);
   }
   smpi_universe_size = process_count;
   process_data       = new simgrid::smpi::Process*[process_count];
   for (int i = 0; i < process_count; i++) {
-    process_data[i] = new simgrid::smpi::Process(i, finalization_barrier);
-  }
-  //if the process was launched through smpirun script we generate a global mpi_comm_world
-  //if not, we let MPI_COMM_NULL, and the comm world will be private to each mpi instance
-  if (smpirun) {
-    group = new  simgrid::smpi::Group(process_count);
-    MPI_COMM_WORLD = new  simgrid::smpi::Comm(group, nullptr);
-    MPI_Attr_put(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, reinterpret_cast<void *>(process_count));
-
-    for (int i = 0; i < process_count; i++)
-      group->set_mapping(i, i);
+    if (smpirun) {
+      process_data[i] = new simgrid::smpi::Process(i, smpi_deployment_finalization_barrier(smpi_default_instance_name));
+      smpi_deployment_register_process(smpi_default_instance_name, i, i);
+    } else {
+      // TODO We can pass a nullptr here because Process::set_data() assigns the
+      // barrier from the instance anyway. This is ugly and should be changed
+      process_data[i] = new simgrid::smpi::Process(i, nullptr);
+    }
   }
 }
 
 void smpi_global_destroy()
 {
-  int count = smpi_process_count();
-
   smpi_bench_destroy();
   smpi_shared_destroy();
-  if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
-      delete MPI_COMM_WORLD->group();
-      MSG_barrier_destroy(process_data[0]->finalization_barrier());
-  }else{
-      smpi_deployment_cleanup_instances();
-  }
-  for (int i = 0; i < count; i++) {
+  smpi_deployment_cleanup_instances();
+  for (int i = 0, count = smpi_process_count(); i < count; i++) {
     if(process_data[i]->comm_self()!=MPI_COMM_NULL){
       simgrid::smpi::Comm::destroy(process_data[i]->comm_self());
     }
@@ -379,13 +385,8 @@ void smpi_global_destroy()
   delete[] process_data;
   process_data = nullptr;
 
-  if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
-    MPI_COMM_WORLD->cleanup_smp();
-    MPI_COMM_WORLD->cleanup_attr<simgrid::smpi::Comm>();
-    if(simgrid::smpi::Colls::smpi_coll_cleanup_callback!=nullptr)
-      simgrid::smpi::Colls::smpi_coll_cleanup_callback();
-    delete MPI_COMM_WORLD;
-  }
+  if (simgrid::smpi::Colls::smpi_coll_cleanup_callback != nullptr)
+    simgrid::smpi::Colls::smpi_coll_cleanup_callback();
 
   MPI_COMM_WORLD = MPI_COMM_NULL;
 
@@ -393,7 +394,7 @@ void smpi_global_destroy()
     xbt_os_timer_free(global_timer);
   }
 
-  xbt_free(index_to_process_data);
+  delete[] index_to_process_data;
   if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP)
     smpi_destroy_global_memory_segments();
   smpi_free_static();
@@ -431,31 +432,24 @@ static void smpi_init_logs(){
 }
 
 static void smpi_init_options(){
-    //return if already called
-    if (smpi_cpu_threshold > -1)
-      return;
-    simgrid::smpi::Colls::set_collectives();
-    simgrid::smpi::Colls::smpi_coll_cleanup_callback=nullptr;
-    smpi_cpu_threshold = xbt_cfg_get_double("smpi/cpu-threshold");
-    smpi_host_speed = xbt_cfg_get_double("smpi/host-speed");
-    const char* smpi_privatize_option               = xbt_cfg_get_string("smpi/privatization");
-    if (std::strcmp(smpi_privatize_option, "no") == 0)
-      smpi_privatize_global_variables = SMPI_PRIVATIZE_NONE;
-    else if (std::strcmp(smpi_privatize_option, "yes") == 0)
-      smpi_privatize_global_variables = SMPI_PRIVATIZE_DEFAULT;
-    else if (std::strcmp(smpi_privatize_option, "mmap") == 0)
-      smpi_privatize_global_variables = SMPI_PRIVATIZE_MMAP;
-    else if (std::strcmp(smpi_privatize_option, "dlopen") == 0)
-      smpi_privatize_global_variables = SMPI_PRIVATIZE_DLOPEN;
-
-    // Some compatibility stuff:
-    else if (std::strcmp(smpi_privatize_option, "1") == 0)
-      smpi_privatize_global_variables = SMPI_PRIVATIZE_DEFAULT;
-    else if (std::strcmp(smpi_privatize_option, "0") == 0)
-      smpi_privatize_global_variables = SMPI_PRIVATIZE_NONE;
-
-    else
-      xbt_die("Invalid value for smpi/privatization: '%s'", smpi_privatize_option);
+  // return if already called
+  if (smpi_cpu_threshold > -1)
+    return;
+  simgrid::smpi::Colls::set_collectives();
+  simgrid::smpi::Colls::smpi_coll_cleanup_callback = nullptr;
+  smpi_cpu_threshold                               = xbt_cfg_get_double("smpi/cpu-threshold");
+  smpi_host_speed                                  = xbt_cfg_get_double("smpi/host-speed");
+  std::string smpi_privatize_option                = xbt_cfg_get_string("smpi/privatization");
+  if (smpi_privatize_option == "no" || smpi_privatize_option == "0")
+    smpi_privatize_global_variables = SMPI_PRIVATIZE_NONE;
+  else if (smpi_privatize_option == "yes" || smpi_privatize_option == "1")
+    smpi_privatize_global_variables = SMPI_PRIVATIZE_DEFAULT;
+  else if (smpi_privatize_option == "mmap")
+    smpi_privatize_global_variables = SMPI_PRIVATIZE_MMAP;
+  else if (smpi_privatize_option == "dlopen")
+    smpi_privatize_global_variables = SMPI_PRIVATIZE_DLOPEN;
+  else
+    xbt_die("Invalid value for smpi/privatization: '%s'", smpi_privatize_option.c_str());
 
 #if defined(__FreeBSD__)
     if (smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) {
@@ -467,7 +461,7 @@ static void smpi_init_options(){
     if (smpi_cpu_threshold < 0)
       smpi_cpu_threshold = DBL_MAX;
 
-    char* val = xbt_cfg_get_string("smpi/shared-malloc");
+    const char* val = xbt_cfg_get_string("smpi/shared-malloc").c_str();
     if (not strcasecmp(val, "yes") || not strcmp(val, "1") || not strcasecmp(val, "on") ||
         not strcasecmp(val, "global")) {
       smpi_cfg_shared_malloc = shmalloc_global;
@@ -664,9 +658,8 @@ int smpi_main(const char* executable, int argc, char *argv[])
       "You may want to use sampling functions or trace replay to reduce this.");
     }
   }
-  int count = smpi_process_count();
   int ret   = 0;
-  for (int i = 0; i < count; i++) {
+  for (int i = 0, count = smpi_process_count(); i < count; i++) {
     if(process_data[i]->return_value()!=0){
       ret=process_data[i]->return_value();//return first non 0 value
       break;
@@ -688,7 +681,7 @@ void SMPI_init(){
   TRACE_smpi_alloc();
   simgrid::surf::surfExitCallbacks.connect(TRACE_smpi_release);
   if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP)
-    smpi_initialize_global_memory_segments();
+    smpi_backup_global_memory_segment();
 }
 
 void SMPI_finalize(){