Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
remove unwanted s4u include in all smpi files
[simgrid.git] / src / smpi / include / private.hpp
index b3bec46..cd4ae34 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2007-2019. The SimGrid Team. All rights reserved.          */
+/* Copyright (c) 2007-2021. The SimGrid Team. All rights reserved.          */
 
 /* This program is free software; you can redistribute it and/or modify it
  * under the terms of the license (GNU LGPL) which comes with this package. */
@@ -6,7 +6,6 @@
 #ifndef SMPI_PRIVATE_HPP
 #define SMPI_PRIVATE_HPP
 
-#include "simgrid/s4u/Barrier.hpp"
 #include "smpi/smpi.h"
 #include "smpi/smpi_helpers_internal.h"
 #include "src/instr/instr_smpi.hpp"
@@ -17,7 +16,7 @@ constexpr unsigned MPI_REQ_PERSISTENT     = 0x1;
 constexpr unsigned MPI_REQ_NON_PERSISTENT = 0x2;
 constexpr unsigned MPI_REQ_SEND           = 0x4;
 constexpr unsigned MPI_REQ_RECV           = 0x8;
-constexpr unsigned MPI_REQ_RECV_DELETE    = 0x10;
+constexpr unsigned MPI_REQ_PROBE          = 0x10;
 constexpr unsigned MPI_REQ_ISEND          = 0x20;
 constexpr unsigned MPI_REQ_SSEND          = 0x40;
 constexpr unsigned MPI_REQ_PREPARED       = 0x80;
@@ -27,6 +26,8 @@ constexpr unsigned MPI_REQ_ACCUMULATE     = 0x400;
 constexpr unsigned MPI_REQ_GENERALIZED    = 0x800;
 constexpr unsigned MPI_REQ_COMPLETE       = 0x1000;
 constexpr unsigned MPI_REQ_BSEND          = 0x2000;
+constexpr unsigned MPI_REQ_MATCHED        = 0x4000;
+constexpr unsigned MPI_REQ_CANCELLED      = 0x8000;
 
 enum class SmpiProcessState { UNINITIALIZED, INITIALIZING, INITIALIZED /*(=MPI_Init called)*/, FINALIZED };
 
@@ -59,25 +60,23 @@ extern XBT_PUBLIC int mpi_statuses_ignore_;
 #define FORT_ADDR(addr, val, val2)                                         \
   (((void *)(addr) == (void*) &(val2))                  \
    ? (val) : (void *)(addr))
-#define FORT_BOTTOM(addr)          FORT_ADDR(addr, MPI_BOTTOM, mpi_bottom_)
-#define FORT_IN_PLACE(addr)        FORT_ADDR(addr, MPI_IN_PLACE, mpi_in_place_)
-#define FORT_STATUS_IGNORE(addr)   static_cast<MPI_Status*>(FORT_ADDR(addr, MPI_STATUS_IGNORE, mpi_status_ignore_))
-#define FORT_STATUSES_IGNORE(addr) static_cast<MPI_Status*>(FORT_ADDR(addr, MPI_STATUSES_IGNORE, mpi_statuses_ignore_))
+#define FORT_BOTTOM(addr) FORT_ADDR((addr), MPI_BOTTOM, mpi_bottom_)
+#define FORT_IN_PLACE(addr) FORT_ADDR((addr), MPI_IN_PLACE, mpi_in_place_)
+#define FORT_STATUS_IGNORE(addr) static_cast<MPI_Status*>(FORT_ADDR((addr), MPI_STATUS_IGNORE, mpi_status_ignore_))
+#define FORT_STATUSES_IGNORE(addr)                                                                                     \
+  static_cast<MPI_Status*>(FORT_ADDR((addr), MPI_STATUSES_IGNORE, mpi_statuses_ignore_))
 
 extern XBT_PRIVATE MPI_Comm MPI_COMM_UNINITIALIZED;
 
-typedef SMPI_Cart_topology* MPIR_Cart_Topology;
-
-typedef SMPI_Graph_topology* MPIR_Graph_Topology;
-
-typedef SMPI_Dist_Graph_topology* MPIR_Dist_Graph_Topology;
+using MPIR_Cart_Topology       = SMPI_Cart_topology*;
+using MPIR_Graph_Topology      = SMPI_Graph_topology*;
+using MPIR_Dist_Graph_Topology = SMPI_Dist_Graph_topology*;
 
 XBT_PRIVATE simgrid::smpi::ActorExt* smpi_process();
 XBT_PRIVATE simgrid::smpi::ActorExt* smpi_process_remote(simgrid::s4u::ActorPtr actor);
 XBT_PRIVATE int smpi_get_universe_size();
 
-XBT_PRIVATE void smpi_deployment_register_process(const std::string& instance_id, int rank,
-                                                  simgrid::s4u::ActorPtr actor);
+XBT_PRIVATE void smpi_deployment_register_process(const std::string& instance_id, int rank, simgrid::s4u::Actor* actor);
 XBT_PRIVATE void smpi_deployment_unregister_process(const std::string& instance_id);
 
 XBT_PRIVATE MPI_Comm* smpi_deployment_comm_world(const std::string& instance_id);
@@ -93,15 +92,28 @@ XBT_PRIVATE int smpi_enabled();
 XBT_PRIVATE double smpi_mpi_wtime();
 XBT_PRIVATE void smpi_mpi_init();
 
+enum class SharedMallocType { NONE, LOCAL, GLOBAL };
+enum class SmpiPrivStrategies { NONE = 0, MMAP = 1, DLOPEN = 2, DEFAULT = DLOPEN };
+
+XBT_PRIVATE double smpi_cfg_host_speed();
+XBT_PRIVATE bool smpi_cfg_simulate_computation();
+XBT_PRIVATE SharedMallocType smpi_cfg_shared_malloc();
+XBT_PRIVATE double smpi_cfg_cpu_thresh();
+XBT_PRIVATE SmpiPrivStrategies smpi_cfg_privatization();
+XBT_PRIVATE int smpi_cfg_async_small_thresh();
+XBT_PRIVATE int smpi_cfg_detached_send_thresh();
+XBT_PRIVATE bool smpi_cfg_grow_injected_times();
+XBT_PRIVATE double smpi_cfg_iprobe_cpu_usage();
+XBT_PRIVATE bool smpi_cfg_trace_call_location();
+XBT_PRIVATE bool smpi_cfg_trace_call_use_absolute_path();
+XBT_PRIVATE std::string smpi_cfg_comp_adjustment_file();
+XBT_PRIVATE std::string smpi_cfg_papi_events_file();
+XBT_PRIVATE double smpi_cfg_auto_shared_malloc_thresh();
+
 // utilities
-extern XBT_PRIVATE double smpi_cpu_threshold;
-extern XBT_PRIVATE double smpi_host_speed;
 extern XBT_PRIVATE char* smpi_data_exe_start; // start of the data+bss segment of the executable
 extern XBT_PRIVATE int smpi_data_exe_size;    // size of the data+bss segment of the executable
 
-enum class SharedMallocType { NONE, LOCAL, GLOBAL };
-extern XBT_PRIVATE SharedMallocType smpi_cfg_shared_malloc; // Whether to activate shared malloc
-
 XBT_PRIVATE void smpi_switch_data_segment(simgrid::s4u::ActorPtr actor);
 
 XBT_PRIVATE void smpi_prepare_global_memory_segment();
@@ -111,6 +123,7 @@ XBT_PRIVATE void smpi_bench_destroy();
 XBT_PRIVATE void smpi_bench_begin();
 XBT_PRIVATE void smpi_bench_end();
 XBT_PRIVATE void smpi_shared_destroy();
+XBT_PRIVATE double smpi_adjust_comp_speed();
 
 XBT_PRIVATE unsigned char* smpi_get_tmp_sendbuffer(size_t size);
 XBT_PRIVATE unsigned char* smpi_get_tmp_recvbuffer(size_t size);
@@ -149,10 +162,11 @@ void mpi_startall_(int* count, int* requests, int* ierr);
 void mpi_wait_(int* request, MPI_Status* status, int* ierr);
 void mpi_waitany_(int* count, int* requests, int* index, MPI_Status* status, int* ierr);
 void mpi_waitall_(int* count, int* requests, MPI_Status* status, int* ierr);
-
+void mpi_free_mem_(void *baseptr, int* ierr);
 void mpi_barrier_(int* comm, int* ierr);
 void mpi_bcast_(void* buf, int* count, int* datatype, int* root, int* comm, int* ierr);
 void mpi_reduce_(void* sendbuf, void* recvbuf, int* count, int* datatype, int* op, int* root, int* comm, int* ierr);
+void mpi_alloc_mem_(int* size, int* info, void *baseptr, int* ierr);
 void mpi_allreduce_(void* sendbuf, void* recvbuf, int* count, int* datatype, int* op, int* comm, int* ierr);
 void mpi_reduce_scatter_(void* sendbuf, void* recvbuf, int* recvcounts, int* datatype, int* op, int* comm, int* ierr);
 void mpi_reduce_scatter_block_(void* sendbuf, void* recvbuf, int* recvcount, int* datatype, int* op, int* comm,
@@ -443,17 +457,39 @@ void mpi_comm_get_parent_(int* parent, int* ierr);
 void mpi_file_close_(int* file, int* ierr);
 void mpi_file_delete_(char* filename, int* info, int* ierr);
 void mpi_file_open_(int* comm, char* filename, int* amode, int* info, int* fh, int* ierr);
-void mpi_file_set_view_(int* fh, long long int* offset, int* etype, int* filetype, char* datarep, int* info, int* ierr);
+void mpi_file_seek_(int* fh, MPI_Offset* offset, int* whence, int* ierr);
+void mpi_file_seek_shared_(int* fh, MPI_Offset* offset, int* whence, int* ierr);
+void mpi_file_get_position_(int* fh, MPI_Offset* offset, int* ierr);
+void mpi_file_get_position_shared_(int* fh, MPI_Offset* offset, int* ierr);
+void mpi_file_set_view_(int* fh, MPI_Offset* offset, int* etype, int* filetype, char* datarep, int* info, int* ierr);
+void mpi_file_get_view_(int* fh, MPI_Offset* disp, int* etype, int* filetype, char *datarep, int* ierr);
 void mpi_file_read_(int* fh, void* buf, int* count, int* datatype, MPI_Status* status, int* ierr);
+void mpi_file_read_shared_(int* fh, void *buf, int* count, int* datatype, MPI_Status* status, int* ierr);
+void mpi_file_read_all_(int* fh, void *buf, int* count, int* datatype, MPI_Status* status, int* ierr);
+void mpi_file_read_ordered_(int* fh, void *buf, int* count, int* datatype, MPI_Status* status, int* ierr);
+void mpi_file_read_at_(int* fh, MPI_Offset* offset, void *buf, int* count, int* datatype, MPI_Status* status, int* ierr);
+void mpi_file_read_at_all_(int* fh, MPI_Offset* offset, void *buf, int* count, int* datatype, MPI_Status* status, int* ierr);
 void mpi_file_write_(int* fh, void* buf, int* count, int* datatype, MPI_Status* status, int* ierr);
+void mpi_file_write_all_(int* fh, void* buf, int* count, int* datatype, MPI_Status* status, int* ierr);
+void mpi_file_write_shared_(int* fh, void *buf, int* count, int* datatype, MPI_Status* status, int* ierr);
+void mpi_file_write_ordered_(int* fh, void *buf, int* count, int* datatype, MPI_Status* status, int* ierr);
+void mpi_file_write_at_(int* fh, MPI_Offset* offset, void *buf, int* count, int* datatype, MPI_Status* status, int* ierr);
+void mpi_file_write_at_all_(int* fh, MPI_Offset* offset, void *buf, int* count, int* datatype, MPI_Status* status, int* ierr);
 void smpi_init_fortran_types();
+void smpi_execute_flops_(double* flops);
+void smpi_execute_flops_benched_(double* flops);
+void smpi_execute_(double* duration);
+void smpi_execute_benched_(double* duration);
 } // extern "C"
 
+XBT_PRIVATE int smpi_temp_shm_get();
+XBT_PRIVATE void* smpi_temp_shm_mmap(int fd, size_t size);
+
 struct s_smpi_privatization_region_t {
   void* address;
   int file_descriptor;
 };
-typedef s_smpi_privatization_region_t* smpi_privatization_region_t;
+using smpi_privatization_region_t = s_smpi_privatization_region_t*;
 
 extern XBT_PRIVATE int smpi_loaded_page;
 XBT_PRIVATE smpi_privatization_region_t smpi_init_global_memory_segment_process();
@@ -467,22 +503,92 @@ XBT_PRIVATE smpi_privatization_region_t smpi_init_global_memory_segment_process(
 #define TOPAGE(addr) (void*)(((unsigned long)(addr) / xbt_pagesize) * xbt_pagesize)
 
 /** Used only if PAPI is compiled in, but integrated anyway so that this file does not depend on internal_config.h (to speed builds) */
-typedef std::vector<std::pair</* counter name */ std::string, /* counter value */ long long>> papi_counter_t;
+using papi_counter_t = std::vector<std::pair</* counter name */ std::string, /* counter value */ long long>>;
 struct papi_process_data {
   papi_counter_t counter_data;
   int event_set;
 };
-extern std::map</* computation unit name */ std::string, papi_process_data> units2papi_setup;
+extern std::map</* computation unit name */ std::string, papi_process_data, std::less<>> units2papi_setup;
 
 extern std::unordered_map<std::string, double> location2speedup;
 
 /** @brief Returns the last call location (filename, linenumber). Process-specific. */
 XBT_PUBLIC smpi_trace_call_location_t* smpi_trace_get_call_location();
 
-enum class SmpiPrivStrategies { NONE = 0, MMAP = 1, DLOPEN = 2, DEFAULT = DLOPEN };
-
-extern XBT_PRIVATE SmpiPrivStrategies smpi_privatize_global_variables;
-
 XBT_PRIVATE void private_execute_flops(double flops);
 
+#define CHECK_ARGS(test, errcode, ...)                                                                                 \
+  if (test) {                                                                                                          \
+    int error_code_ = (errcode);                                                                                       \
+    if (error_code_ != MPI_SUCCESS)                                                                                    \
+      XBT_WARN(__VA_ARGS__);                                                                                           \
+    return error_code_;                                                                                                \
+  }
+
+#define CHECK_MPI_NULL(num, val, err, ptr)                                                                             \
+  CHECK_ARGS((ptr) == (val), (err),                                                                                    \
+             "%s: param %d %s cannot be %s", __func__, (num), _XBT_STRINGIFY(ptr), _XBT_STRINGIFY(val));
+#define CHECK_NULL(num,err,buf)                                                                                        \
+  CHECK_ARGS((buf) == nullptr, (err),                                                                                  \
+             "%s: param %d %s cannot be NULL", __func__, (num), _XBT_STRINGIFY(buf));
+#define CHECK_NEGATIVE(num, err, val)                                                                                  \
+  CHECK_ARGS((val) < 0, (err),                                                                                         \
+             "%s: param %d %s cannot be negative", __func__, (num), _XBT_STRINGIFY(val));
+#define CHECK_NEGATIVE_OR_ZERO(num, err, val)                                                                          \
+  CHECK_ARGS((val) <= 0, (err),                                                                                        \
+             "%s: param %d %s cannot be negative or 0", __func__, (num), _XBT_STRINGIFY(val));
+#define CHECK_COMM2(num, comm)                                                                                         \
+  CHECK_MPI_NULL((num), MPI_COMM_NULL, MPI_ERR_COMM, (comm))
+#define CHECK_DELETED(num, err, obj)                                                                                   \
+  CHECK_ARGS((obj)->deleted(), (err), "%s: param %d %s has already been freed", __func__, (num), _XBT_STRINGIFY(obj));
+#define CHECK_COMM(num)                                                                                                \
+  CHECK_COMM2((num), comm)                                                                                             \
+  CHECK_DELETED((num), MPI_ERR_COMM, comm)
+#define CHECK_REQUEST(num)                                                                                             \
+  CHECK_ARGS(request == nullptr, MPI_ERR_REQUEST,                                                                      \
+             "%s: param %d request cannot be NULL",__func__, (num));
+#define CHECK_BUFFER(num,buf,count)                                                                                    \
+  CHECK_ARGS((buf) == nullptr && (count) > 0, MPI_ERR_BUFFER,                                                          \
+             "%s: param %d %s cannot be NULL if %s > 0",__func__, (num), _XBT_STRINGIFY(buf), _XBT_STRINGIFY(count));
+#define CHECK_COUNT(num, count)                                                                                        \
+  CHECK_NEGATIVE((num), MPI_ERR_COUNT, (count))
+#define CHECK_TYPE(num, datatype)                                                                                      \
+  CHECK_ARGS(((datatype) == MPI_DATATYPE_NULL|| not (datatype)->is_valid()), MPI_ERR_TYPE,                             \
+             "%s: param %d %s cannot be MPI_DATATYPE_NULL or invalid", __func__, (num), _XBT_STRINGIFY(datatype));
+#define CHECK_OP(num, op, type)                                                                                        \
+  CHECK_MPI_NULL((num), MPI_OP_NULL, MPI_ERR_OP, (op))                                                                 \
+  CHECK_ARGS(((op)->allowed_types() && (((op)->allowed_types() & (type)->flags()) == 0)), MPI_ERR_OP,                \
+             "%s: param %d op %s can't be applied to type %s", __func__, (num), _XBT_STRINGIFY(op), type->name());
+#define CHECK_ROOT(num)\
+  CHECK_ARGS((root < 0 || root >= comm->size()), MPI_ERR_ROOT,                                                         \
+             "%s: param %d root (=%d) cannot be negative or larger than communicator size (=%d)", __func__, (num),     \
+             root, comm->size());
+#define CHECK_INFO(num,info)                                                                                           \
+  CHECK_MPI_NULL((num), MPI_INFO_NULL, MPI_ERR_INFO, (info))
+#define CHECK_TAG(num,tag)                                                                                             \
+  CHECK_ARGS(((tag) < 0 && (tag) !=  MPI_ANY_TAG), MPI_ERR_TAG,                                                        \
+             "%s: param %d %s (=%d) cannot be negative", __func__, (num), _XBT_STRINGIFY(tag), (tag));
+#define CHECK_FILE(num, fh)                                                                                            \
+  CHECK_MPI_NULL((num), MPI_FILE_NULL, MPI_ERR_FILE, (fh))
+#define CHECK_OFFSET(num, offset)                                                                                      \
+  CHECK_NEGATIVE((num), MPI_ERR_DISP, (offset))
+#define CHECK_GROUP(num, group)                                                                                        \
+  CHECK_MPI_NULL((num), MPI_GROUP_NULL, MPI_ERR_GROUP, (group))
+#define CHECK_WIN(num, win)                                                                                            \
+  CHECK_MPI_NULL((num), MPI_WIN_NULL, MPI_ERR_WIN, (win))
+#define CHECK_RANK(num, rank, comm)                                                                                    \
+  CHECK_ARGS(((rank) >= (comm)->size() || (rank) <0), MPI_ERR_RANK,                                                    \
+             "%s: param %d %s (=%d) cannot be < 0 or > %d", __func__, (num), _XBT_STRINGIFY(rank),                     \
+             (rank), (comm)->size() );
+#define CHECK_PROC_RMA(num,proc,win)                                                                                   \
+  CHECK_MPI_NULL((num), MPI_PROC_NULL, MPI_SUCCESS, (proc))                                                            \
+  CHECK_RANK(num, proc, win->comm())
+#define CHECK_NOT_IN_PLACE_ROOT(num, buf)                                                                                   \
+  CHECK_ARGS((buf == MPI_IN_PLACE), MPI_ERR_BUFFER,                                                                    \
+             "%s: param %d %s cannot be MPI_IN_PLACE for rank %d with root %d", __func__, (num), _XBT_STRINGIFY(buf),  \
+             rank, root);
+#define CHECK_NOT_IN_PLACE(num, buf)                                                                                   \
+  CHECK_ARGS((buf == MPI_IN_PLACE), MPI_ERR_BUFFER,                                                                    \
+             "%s: param %d %s cannot be MPI_IN_PLACE for rank %d", __func__, (num), _XBT_STRINGIFY(buf),  \
+             rank);
 #endif