Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
More template based factorization
[simgrid.git] / src / smpi / smpi_global.cpp
index bcc611c..c5b7734 100644 (file)
@@ -1,5 +1,4 @@
-/* Copyright (c) 2007-2015. The SimGrid Team.
- * All rights reserved.                                                     */
+/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved.          */
 
 /* This program is free software; you can redistribute it and/or modify it
  * under the terms of the license (GNU LGPL) which comes with this package. */
@@ -9,14 +8,13 @@
 #include "private.hpp"
 #include "simgrid/s4u/Mailbox.hpp"
 #include "simgrid/sg_config.h"
-#include "smpi_mpi_dt_private.h"
 #include "src/kernel/activity/SynchroComm.hpp"
 #include "src/mc/mc_record.h"
 #include "src/mc/mc_replay.h"
 #include "src/msg/msg_private.h"
 #include "src/simix/smx_private.h"
 #include "surf/surf.h"
-#include "xbt/replay.h"
+#include "xbt/replay.hpp"
 
 #include <float.h> /* DBL_MAX */
 #include <fstream>
@@ -75,8 +73,6 @@ int process_count = 0;
 int smpi_universe_size = 0;
 int* index_to_process_data = nullptr;
 extern double smpi_total_benched_time;
-extern xbt_dict_t smpi_type_keyvals;
-extern xbt_dict_t smpi_comm_keyvals;
 xbt_os_timer_t global_timer;
 MPI_Comm MPI_COMM_WORLD = MPI_COMM_UNINITIALIZED;
 MPI_Errhandler *MPI_ERRORS_RETURN = nullptr;
@@ -215,7 +211,8 @@ bool smpi_process_get_replaying(){
   int index = smpi_process_index();
   if (index != MPI_UNDEFINED)
     return process_data[index_to_process_data[index]]->replaying;
-  else return (_xbt_replay_is_active() != 0);
+  else
+    return !simgrid::xbt::replay_is_active();
 }
 
 int smpi_global_size()
@@ -352,9 +349,9 @@ MPI_Comm smpi_process_comm_self()
 {
   smpi_process_data_t data = smpi_process_data();
   if(data->comm_self==MPI_COMM_NULL){
-    MPI_Group group = smpi_group_new(1);
-    data->comm_self = smpi_comm_new(group, nullptr);
-    smpi_group_set_mapping(group, smpi_process_index(), 0);
+    MPI_Group group = new  Group(1);
+    data->comm_self = new  Comm(group, nullptr);
+    group->set_mapping(smpi_process_index(), 0);
   }
 
   return data->comm_self;
@@ -384,12 +381,6 @@ int smpi_process_get_sampling()
   return data->sampling;
 }
 
-void print_request(const char *message, MPI_Request request)
-{
-  XBT_VERB("%s  request %p  [buf = %p, size = %zu, src = %d, dst = %d, tag = %d, flags = %x]",
-       message, request, request->buf, request->size, request->src, request->dst, request->tag, request->flags);
-}
-
 void smpi_comm_set_copy_data_callback(void (*callback) (smx_activity_t, void*, size_t))
 {
   smpi_comm_copy_data_callback = callback;
@@ -604,13 +595,13 @@ void smpi_global_init()
   //if the process was launched through smpirun script we generate a global mpi_comm_world
   //if not, we let MPI_COMM_NULL, and the comm world will be private to each mpi instance
   if(smpirun){
-    group = smpi_group_new(process_count);
-    MPI_COMM_WORLD = smpi_comm_new(group, nullptr);
+    group = new  Group(process_count);
+    MPI_COMM_WORLD = new  Comm(group, nullptr);
     MPI_Attr_put(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, reinterpret_cast<void *>(process_count));
     msg_bar_t bar = MSG_barrier_init(process_count);
 
     for (i = 0; i < process_count; i++) {
-      smpi_group_set_mapping(group, i, i);
+      group->set_mapping(i, i);
       process_data[i]->finalization_barrier = bar;
     }
   }
@@ -622,17 +613,17 @@ void smpi_global_destroy()
 
   smpi_bench_destroy();
   if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
-      while (smpi_group_unuse(smpi_comm_group(MPI_COMM_WORLD)) > 0);
+      delete MPI_COMM_WORLD->group();
       MSG_barrier_destroy(process_data[0]->finalization_barrier);
   }else{
       smpi_deployment_cleanup_instances();
   }
   for (int i = 0; i < count; i++) {
     if(process_data[i]->comm_self!=MPI_COMM_NULL){
-      smpi_comm_destroy(process_data[i]->comm_self);
+      Comm::destroy(process_data[i]->comm_self);
     }
     if(process_data[i]->comm_intra!=MPI_COMM_NULL){
-      smpi_comm_destroy(process_data[i]->comm_intra);
+      Comm::destroy(process_data[i]->comm_intra);
     }
     xbt_os_timer_free(process_data[i]->timer);
     xbt_mutex_destroy(process_data[i]->mailboxes_mutex);
@@ -642,11 +633,11 @@ void smpi_global_destroy()
   process_data = nullptr;
 
   if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
-    smpi_comm_cleanup_smp(MPI_COMM_WORLD);
-    smpi_comm_cleanup_attributes(MPI_COMM_WORLD);
-    if(smpi_coll_cleanup_callback!=nullptr)
-      smpi_coll_cleanup_callback();
-    xbt_free(MPI_COMM_WORLD);
+    MPI_COMM_WORLD->cleanup_smp();
+    MPI_COMM_WORLD->cleanup_attr<Comm>();
+    if(Colls::smpi_coll_cleanup_callback!=nullptr)
+      Colls::smpi_coll_cleanup_callback();
+    delete MPI_COMM_WORLD;
   }
 
   MPI_COMM_WORLD = MPI_COMM_NULL;
@@ -656,15 +647,13 @@ void smpi_global_destroy()
   }
 
   xbt_free(index_to_process_data);
-  if(smpi_type_keyvals!=nullptr) 
-    xbt_dict_free(&smpi_type_keyvals);
-  if(smpi_comm_keyvals!=nullptr) 
-    xbt_dict_free(&smpi_comm_keyvals);
   if(smpi_privatize_global_variables)
     smpi_destroy_global_memory_segments();
   smpi_free_static();
 }
 
+extern "C" {
+
 #ifndef WIN32
 
 void __attribute__ ((weak)) user_main_()
@@ -695,7 +684,6 @@ int __attribute__ ((weak)) main(int argc, char **argv)
 
 #endif
 
-extern "C" {
 static void smpi_init_logs(){
 
   /* Connect log categories.  See xbt/log.c */
@@ -708,70 +696,25 @@ static void smpi_init_logs(){
   XBT_LOG_CONNECT(smpi_coll);
   XBT_LOG_CONNECT(smpi_colls);
   XBT_LOG_CONNECT(smpi_comm);
+  XBT_LOG_CONNECT(smpi_datatype);
   XBT_LOG_CONNECT(smpi_dvfs);
   XBT_LOG_CONNECT(smpi_group);
   XBT_LOG_CONNECT(smpi_kernel);
   XBT_LOG_CONNECT(smpi_mpi);
-  XBT_LOG_CONNECT(smpi_mpi_dt);
+  XBT_LOG_CONNECT(smpi_memory);
+  XBT_LOG_CONNECT(smpi_op);
   XBT_LOG_CONNECT(smpi_pmpi);
+  XBT_LOG_CONNECT(smpi_request);
   XBT_LOG_CONNECT(smpi_replay);
   XBT_LOG_CONNECT(smpi_rma);
+  XBT_LOG_CONNECT(smpi_utils);
 }
 }
 
 static void smpi_init_options(){
-  int gather_id = find_coll_description(mpi_coll_gather_description, xbt_cfg_get_string("smpi/gather"),"gather");
-    mpi_coll_gather_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int, MPI_Datatype, int, MPI_Comm)>
-        (mpi_coll_gather_description[gather_id].coll);
-
-    int allgather_id = find_coll_description(mpi_coll_allgather_description,
-                                             xbt_cfg_get_string("smpi/allgather"),"allgather");
-    mpi_coll_allgather_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int, MPI_Datatype, MPI_Comm)>
-        (mpi_coll_allgather_description[allgather_id].coll);
-
-    int allgatherv_id = find_coll_description(mpi_coll_allgatherv_description,
-                                              xbt_cfg_get_string("smpi/allgatherv"),"allgatherv");
-    mpi_coll_allgatherv_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int *, int *, MPI_Datatype, MPI_Comm)>
-        (mpi_coll_allgatherv_description[allgatherv_id].coll);
-
-    int allreduce_id = find_coll_description(mpi_coll_allreduce_description,
-                                             xbt_cfg_get_string("smpi/allreduce"),"allreduce");
-    mpi_coll_allreduce_fun = reinterpret_cast<int (*)(void *sbuf, void *rbuf, int rcount, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)>
-        (mpi_coll_allreduce_description[allreduce_id].coll);
-
-    int alltoall_id = find_coll_description(mpi_coll_alltoall_description,
-                                            xbt_cfg_get_string("smpi/alltoall"),"alltoall");
-    mpi_coll_alltoall_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int, MPI_Datatype, MPI_Comm)>
-        (mpi_coll_alltoall_description[alltoall_id].coll);
-
-    int alltoallv_id = find_coll_description(mpi_coll_alltoallv_description,
-                                             xbt_cfg_get_string("smpi/alltoallv"),"alltoallv");
-    mpi_coll_alltoallv_fun = reinterpret_cast<int (*)(void *, int *, int *, MPI_Datatype, void *, int *, int *, MPI_Datatype, MPI_Comm)>
-        (mpi_coll_alltoallv_description[alltoallv_id].coll);
-
-    int bcast_id = find_coll_description(mpi_coll_bcast_description, xbt_cfg_get_string("smpi/bcast"),"bcast");
-    mpi_coll_bcast_fun = reinterpret_cast<int (*)(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm com)>
-        (mpi_coll_bcast_description[bcast_id].coll);
-
-    int reduce_id = find_coll_description(mpi_coll_reduce_description, xbt_cfg_get_string("smpi/reduce"),"reduce");
-    mpi_coll_reduce_fun = reinterpret_cast<int (*)(void *buf, void *rbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)>
-        (mpi_coll_reduce_description[reduce_id].coll);
-
-    int reduce_scatter_id =
-        find_coll_description(mpi_coll_reduce_scatter_description,
-                              xbt_cfg_get_string("smpi/reduce-scatter"),"reduce_scatter");
-    mpi_coll_reduce_scatter_fun = reinterpret_cast<int (*)(void *sbuf, void *rbuf, int *rcounts,MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)>
-        (mpi_coll_reduce_scatter_description[reduce_scatter_id].coll);
-
-    int scatter_id = find_coll_description(mpi_coll_scatter_description, xbt_cfg_get_string("smpi/scatter"),"scatter");
-    mpi_coll_scatter_fun = reinterpret_cast<int (*)(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf,int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)>
-        (mpi_coll_scatter_description[scatter_id].coll);
-
-    int barrier_id = find_coll_description(mpi_coll_barrier_description, xbt_cfg_get_string("smpi/barrier"),"barrier");
-    mpi_coll_barrier_fun = reinterpret_cast<int (*)(MPI_Comm comm)>
-        (mpi_coll_barrier_description[barrier_id].coll);
-
-    smpi_coll_cleanup_callback=nullptr;
+
+    Colls::set_collectives();
+    Colls::smpi_coll_cleanup_callback=nullptr;
     smpi_cpu_threshold = xbt_cfg_get_double("smpi/cpu-threshold");
     smpi_host_speed = xbt_cfg_get_double("smpi/host-speed");
     smpi_privatize_global_variables = xbt_cfg_get_boolean("smpi/privatize-global-variables");