Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
Remove warnings.
[simgrid.git] / src / smpi / smpi_global.cpp
index 6878d5d..c29f0ca 100644 (file)
@@ -7,6 +7,7 @@
 #include "private.h"
 #include "private.hpp"
 #include "simgrid/s4u/Mailbox.hpp"
+#include "smpi/smpi_shared_malloc.hpp"
 #include "simgrid/sg_config.h"
 #include "src/kernel/activity/SynchroComm.hpp"
 #include "src/mc/mc_record.h"
@@ -42,7 +43,7 @@ struct papi_process_data {
 #endif
 std::unordered_map<std::string, double> location2speedup;
 
-Process **process_data = nullptr;
+simgrid::smpi::Process **process_data = nullptr;
 int process_count = 0;
 int smpi_universe_size = 0;
 int* index_to_process_data = nullptr;
@@ -66,13 +67,13 @@ int smpi_process_count()
   return process_count;
 }
 
-Process* smpi_process()
+simgrid::smpi::Process* smpi_process()
 {
   simgrid::MsgActorExt* msgExt = static_cast<simgrid::MsgActorExt*>(SIMIX_process_self()->data);
-  return static_cast<Process*>(msgExt->data);
+  return static_cast<simgrid::smpi::Process*>(msgExt->data);
 }
 
-Process* smpi_process_remote(int index)
+simgrid::smpi::Process* smpi_process_remote(int index)
 {
   return process_data[index_to_process_data[index]];
 }
@@ -82,7 +83,7 @@ MPI_Comm smpi_process_comm_self(){
 }
 
 void smpi_process_init(int *argc, char ***argv){
-  Process::init(argc, argv);
+  simgrid::smpi::Process::init(argc, argv);
 }
 
 int smpi_process_index(){
@@ -103,31 +104,88 @@ void smpi_comm_set_copy_data_callback(void (*callback) (smx_activity_t, void*, s
   smpi_comm_copy_data_callback = callback;
 }
 
+static void print(std::vector<std::pair<size_t, size_t>> vec) {
+    fprintf(stderr, "{");
+    for(auto elt: vec) {
+        fprintf(stderr, "(0x%lx, 0x%lx),", elt.first, elt.second);
+    }
+    fprintf(stderr, "}\n");
+}
+static void memcpy_private(void *dest, const void *src, size_t n, std::vector<std::pair<size_t, size_t>> &private_blocks) {
+  for(auto block : private_blocks) {
+    memcpy((uint8_t*)dest+block.first, (uint8_t*)src+block.first, block.second-block.first);
+  }
+}
+
+static void check_blocks(std::vector<std::pair<size_t, size_t>> &private_blocks, size_t buff_size) {
+  for(auto block : private_blocks) {
+    xbt_assert(block.first <= block.second && block.second <= buff_size, "Oops, bug in shared malloc.");
+  }
+}
+
 void smpi_comm_copy_buffer_callback(smx_activity_t synchro, void *buff, size_t buff_size)
 {
+  simgrid::kernel::activity::Comm *comm = dynamic_cast<simgrid::kernel::activity::Comm*>(synchro);
+  int src_shared=0, dst_shared=0;
+  size_t src_offset=0, dst_offset=0;
+  std::vector<std::pair<size_t, size_t>> src_private_blocks;
+  std::vector<std::pair<size_t, size_t>> dst_private_blocks;
   XBT_DEBUG("Copy the data over");
+  if((src_shared=smpi_is_shared(buff, src_private_blocks, &src_offset))) {
+    XBT_DEBUG("Sender %p is shared. Let's ignore it.", buff);
+    src_private_blocks = shift_and_frame_private_blocks(src_private_blocks, src_offset, buff_size);
+  }
+  else {
+    src_private_blocks.clear();
+    src_private_blocks.push_back(std::make_pair(0, buff_size));
+  }
+  if((dst_shared=smpi_is_shared((char*)comm->dst_buff, dst_private_blocks, &dst_offset))) {
+    XBT_DEBUG("Receiver %p is shared. Let's ignore it.", (char*)comm->dst_buff);
+    dst_private_blocks = shift_and_frame_private_blocks(dst_private_blocks, dst_offset, buff_size);
+  }
+  else {
+    dst_private_blocks.clear();
+    dst_private_blocks.push_back(std::make_pair(0, buff_size));
+  }
+/*
+  fprintf(stderr, "size: 0x%x\n", buff_size);
+  fprintf(stderr, "src: ");
+  print(src_private_blocks);
+  fprintf(stderr, "src_offset = 0x%x\n", src_offset);
+  fprintf(stderr, "dst: ");
+  print(dst_private_blocks);
+  fprintf(stderr, "dst_offset = 0x%x\n", dst_offset);
+*/
+  check_blocks(src_private_blocks, buff_size);
+  check_blocks(dst_private_blocks, buff_size);
+  auto private_blocks = merge_private_blocks(src_private_blocks, dst_private_blocks);
+/*
+  fprintf(stderr, "Private blocks: ");
+  print(private_blocks);
+*/
+  check_blocks(private_blocks, buff_size);
   void* tmpbuff=buff;
-  simgrid::kernel::activity::Comm *comm = dynamic_cast<simgrid::kernel::activity::Comm*>(synchro);
-
   if((smpi_privatize_global_variables) && (static_cast<char*>(buff) >= smpi_start_data_exe)
       && (static_cast<char*>(buff) < smpi_start_data_exe + smpi_size_data_exe )
     ){
        XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !");
 
        smpi_switch_data_segment(
-           (static_cast<Process*>((static_cast<simgrid::MsgActorExt*>(comm->src_proc->data)->data))->index()));
+           (static_cast<simgrid::smpi::Process*>((static_cast<simgrid::MsgActorExt*>(comm->src_proc->data)->data))->index()));
        tmpbuff = static_cast<void*>(xbt_malloc(buff_size));
-       memcpy(tmpbuff, buff, buff_size);
+       memcpy_private(tmpbuff, buff, buff_size, private_blocks);
   }
 
   if((smpi_privatize_global_variables) && ((char*)comm->dst_buff >= smpi_start_data_exe)
       && ((char*)comm->dst_buff < smpi_start_data_exe + smpi_size_data_exe )){
        XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment");
        smpi_switch_data_segment(
-           (static_cast<Process*>((static_cast<simgrid::MsgActorExt*>(comm->dst_proc->data)->data))->index()));
+           (static_cast<simgrid::smpi::Process*>((static_cast<simgrid::MsgActorExt*>(comm->dst_proc->data)->data))->index()));
   }
 
-  memcpy(comm->dst_buff, tmpbuff, buff_size);
+  XBT_DEBUG("Copying %zu bytes from %p to %p", buff_size, tmpbuff,comm->dst_buff);
+  memcpy_private(comm->dst_buff, tmpbuff, buff_size, private_blocks);
+
   if (comm->detached) {
     // if this is a detached send, the source buffer was duplicated by SMPI
     // sender to make the original buffer available to the application ASAP
@@ -136,8 +194,8 @@ void smpi_comm_copy_buffer_callback(smx_activity_t synchro, void *buff, size_t b
     //xbt_free(comm->comm.src_data);// inside SMPI the request is kept inside the user data and should be free
     comm->src_buff = nullptr;
   }
-
   if(tmpbuff!=buff)xbt_free(tmpbuff);
+
 }
 
 void smpi_comm_null_copy_buffer_callback(smx_activity_t comm, void *buff, size_t buff_size)
@@ -272,15 +330,15 @@ void smpi_global_init()
     smpirun=1;
   }
   smpi_universe_size = process_count;
-  process_data       = new Process*[process_count];
+  process_data       = new simgrid::smpi::Process*[process_count];
   for (i = 0; i < process_count; i++) {
-    process_data[i]                       = new Process(i);
+    process_data[i]                       = new simgrid::smpi::Process(i);
   }
   //if the process was launched through smpirun script we generate a global mpi_comm_world
   //if not, we let MPI_COMM_NULL, and the comm world will be private to each mpi instance
   if(smpirun){
-    group = new  Group(process_count);
-    MPI_COMM_WORLD = new  Comm(group, nullptr);
+    group = new  simgrid::smpi::Group(process_count);
+    MPI_COMM_WORLD = new  simgrid::smpi::Comm(group, nullptr);
     MPI_Attr_put(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, reinterpret_cast<void *>(process_count));
     msg_bar_t bar = MSG_barrier_init(process_count);
 
@@ -296,6 +354,7 @@ void smpi_global_destroy()
   int count = smpi_process_count();
 
   smpi_bench_destroy();
+  smpi_shared_destroy();
   if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
       delete MPI_COMM_WORLD->group();
       MSG_barrier_destroy(process_data[0]->finalization_barrier());
@@ -304,10 +363,10 @@ void smpi_global_destroy()
   }
   for (int i = 0; i < count; i++) {
     if(process_data[i]->comm_self()!=MPI_COMM_NULL){
-      Comm::destroy(process_data[i]->comm_self());
+      simgrid::smpi::Comm::destroy(process_data[i]->comm_self());
     }
     if(process_data[i]->comm_intra()!=MPI_COMM_NULL){
-      Comm::destroy(process_data[i]->comm_intra());
+      simgrid::smpi::Comm::destroy(process_data[i]->comm_intra());
     }
     xbt_os_timer_free(process_data[i]->timer());
     xbt_mutex_destroy(process_data[i]->mailboxes_mutex());
@@ -318,9 +377,9 @@ void smpi_global_destroy()
 
   if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
     MPI_COMM_WORLD->cleanup_smp();
-    MPI_COMM_WORLD->cleanup_attr<Comm>();
-    if(Colls::smpi_coll_cleanup_callback!=nullptr)
-      Colls::smpi_coll_cleanup_callback();
+    MPI_COMM_WORLD->cleanup_attr<simgrid::smpi::Comm>();
+    if(simgrid::smpi::Colls::smpi_coll_cleanup_callback!=nullptr)
+      simgrid::smpi::Colls::smpi_coll_cleanup_callback();
     delete MPI_COMM_WORLD;
   }
 
@@ -347,7 +406,7 @@ void __attribute__ ((weak)) user_main_()
 
 int __attribute__ ((weak)) smpi_simulated_main_(int argc, char **argv)
 {
-  Process::init(&argc, &argv);
+  simgrid::smpi::Process::init(&argc, &argv);
   user_main_();
   return 0;
 }
@@ -390,14 +449,15 @@ static void smpi_init_logs(){
   XBT_LOG_CONNECT(smpi_request);
   XBT_LOG_CONNECT(smpi_replay);
   XBT_LOG_CONNECT(smpi_rma);
+  XBT_LOG_CONNECT(smpi_shared);
   XBT_LOG_CONNECT(smpi_utils);
 }
 }
 
 static void smpi_init_options(){
 
-    Colls::set_collectives();
-    Colls::smpi_coll_cleanup_callback=nullptr;
+    simgrid::smpi::Colls::set_collectives();
+    simgrid::smpi::Colls::smpi_coll_cleanup_callback=nullptr;
     smpi_cpu_threshold = xbt_cfg_get_double("smpi/cpu-threshold");
     smpi_host_speed = xbt_cfg_get_double("smpi/host-speed");
     smpi_privatize_global_variables = xbt_cfg_get_boolean("smpi/privatize-global-variables");