Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
[SMPI] Ported smpi_deployment.cpp to C++
[simgrid.git] / src / smpi / smpi_deployment.cpp
index b1b0575..8988132 100644 (file)
@@ -1,27 +1,50 @@
-/* Copyright (c) 2004-2014. The SimGrid Team.
+/* Copyright (c) 2004-2017. The SimGrid Team.
  * All rights reserved.                                                     */
 
 /* This program is free software; you can redistribute it and/or modify it
  * under the terms of the license (GNU LGPL) which comes with this package. */
 
 #include "private.h"
-#include "xbt/sysdep.h"
-#include "xbt/synchro_core.h"
+#include "simgrid/msg.h" /* barrier */
+#include "simgrid/s4u.hpp"
+#include "src/smpi/SmpiHost.hpp"
 #include "xbt/log.h"
-#include "xbt/dict.h"
 
-static xbt_dict_t smpi_instances = NULL;
-extern int process_count;
-extern int* index_to_process_data;
+namespace simgrid {
+namespace smpi {
+namespace app {
+
+class Instance {
+public:
+  Instance(const char* name, int max_no_processes, int process_count, MPI_Comm comm, msg_bar_t finalization_barrier)
+      : name(name)
+      , size(max_no_processes)
+      , present_processes(0)
+      , index(process_count)
+      , comm_world(comm)
+      , finalization_barrier(finalization_barrier)
+  {
+  }
 
-typedef struct s_smpi_mpi_instance{
   const char* name;
   int size;
   int present_processes;
-  int index;
+  int index; // Badly named. This should be "no_processes_when_registering" ;)
   MPI_Comm comm_world;
-  xbt_bar_t finalization_barrier;
-} s_smpi_mpi_instance_t;
+  msg_bar_t finalization_barrier;
+};
+}
+}
+namespace s4u {
+extern std::map<std::string, simgrid::s4u::Host*> host_list;
+}
+}
+
+using simgrid::smpi::app::Instance;
+
+static std::map<std::string, Instance> smpi_instances;
+extern int process_count; // How many processes have been allocated over all instances?
+extern int* index_to_process_data;
 
 /** \ingroup smpi_simulation
  * \brief Registers a running instance of a MPI program.
@@ -35,60 +58,66 @@ void SMPI_app_instance_register(const char *name, xbt_main_func_t code, int num_
 {
   SIMIX_function_register(name, code);
 
-  s_smpi_mpi_instance_t* instance = (s_smpi_mpi_instance_t*)xbt_malloc(sizeof(s_smpi_mpi_instance_t));
+  static int already_called = 0;
+  if (!already_called) {
+    already_called = 1;
+    for (auto& item : simgrid::s4u::host_list) {
+      simgrid::s4u::Host* host = item.second;
+      host->extension_set(new simgrid::smpi::SmpiHost(host));
+    }
+  }
 
-  instance->name = name;
-  instance->size = num_processes;
-  instance->present_processes = 0;
-  instance->index = process_count;
-  instance->comm_world = MPI_COMM_NULL;
-  instance->finalization_barrier=xbt_barrier_init(num_processes);
+  Instance instance(name, num_processes, process_count, MPI_COMM_NULL, MSG_barrier_init(num_processes));
 
   process_count+=num_processes;
 
-  if(!smpi_instances){
-    smpi_instances = xbt_dict_new_homogeneous(xbt_free_f);
-  }
-
-  xbt_dict_set(smpi_instances, name, (void*)instance, NULL);
-  return;
+  smpi_instances.insert(std::pair<std::string, Instance>(name, instance));
 }
 
 //get the index of the process in the process_data array
-void smpi_deployment_register_process(const char* instance_id, int rank, int index,MPI_Comm** comm, xbt_bar_t* bar){
-
-  if(!smpi_instances){//no instance registered, we probably used smpirun.
+void smpi_deployment_register_process(const char* instance_id, int rank, int index)
+{
+  if (smpi_instances.empty()) { // no instance registered, we probably used smpirun.
     index_to_process_data[index]=index;
-    *bar = NULL;
-    *comm = NULL;
     return;
   }
 
-  s_smpi_mpi_instance_t* instance =
-     static_cast<s_smpi_mpi_instance_t*>(xbt_dict_get_or_null(smpi_instances, instance_id));
-  xbt_assert(instance, "Error, unknown instance %s", instance_id);
+  Instance& instance = smpi_instances.at(instance_id);
+
+  if (instance.comm_world == MPI_COMM_NULL) {
+    MPI_Group group     = new simgrid::smpi::Group(instance.size);
+    instance.comm_world = new simgrid::smpi::Comm(group, nullptr);
+  }
+  instance.present_processes++;
+  index_to_process_data[index] = instance.index + rank;
+  instance.comm_world->group()->set_mapping(index, rank);
+}
+
+//get the index of the process in the process_data array
+MPI_Comm* smpi_deployment_comm_world(const char* instance_id)
+{
+  if (smpi_instances.empty()) { // no instance registered, we probably used smpirun.
+    return nullptr;
+  }
+  Instance& instance = smpi_instances.at(instance_id);
+  return &instance.comm_world;
+}
 
-  if(instance->comm_world == MPI_COMM_NULL){
-    MPI_Group group = smpi_group_new(instance->size);
-    instance->comm_world = smpi_comm_new(group, NULL);
+msg_bar_t smpi_deployment_finalization_barrier(const char* instance_id)
+{
+  if (smpi_instances.empty()) { // no instance registered, we probably used smpirun.
+    return nullptr;
   }
-  instance->present_processes++;
-  index_to_process_data[index]=instance->index+rank;
-  smpi_group_set_mapping(smpi_comm_group(instance->comm_world), index, rank);
-  *bar = instance->finalization_barrier;
-  *comm = &instance->comm_world;
-  return;
+  Instance& instance = smpi_instances.at(instance_id);
+  return instance.finalization_barrier;
 }
 
 void smpi_deployment_cleanup_instances(){
-  xbt_dict_cursor_t cursor = NULL;
-  s_smpi_mpi_instance_t* instance = NULL;
-  char *name = NULL;
-  xbt_dict_foreach(smpi_instances, cursor, name, instance) {
-    if(instance->comm_world!=MPI_COMM_NULL)
-      while (smpi_group_unuse(smpi_comm_group(instance->comm_world)) > 0);
-    xbt_free(instance->comm_world);
-    xbt_barrier_destroy(instance->finalization_barrier);
+  for (auto& item : smpi_instances) {
+    Instance instance = item.second;
+    if (instance.comm_world != MPI_COMM_NULL)
+      delete instance.comm_world->group();
+    delete instance.comm_world;
+    MSG_barrier_destroy(instance.finalization_barrier);
   }
-  xbt_dict_free(&smpi_instances);
 }