Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
Merge pull request #283 from mpoquet/smpi_convert_fix
authorMartin Quinson <624847+mquinson@users.noreply.github.com>
Sat, 7 Jul 2018 19:01:00 +0000 (21:01 +0200)
committerGitHub <noreply@github.com>
Sat, 7 Jul 2018 19:01:00 +0000 (21:01 +0200)
Smpi convert script fixes

24 files changed:
.codacy.yml [new file with mode: 0644]
doc/doxygen/inside_tests.doc
doc/doxygen/install.doc
include/simgrid/barrier.h [new file with mode: 0644]
include/simgrid/forward.h
include/simgrid/msg.h
include/smpi/smpi.h
src/msg/msg_legacy.cpp
src/msg/msg_synchro.cpp
src/s4u/s4u_Barrier.cpp
src/simgrid/sg_config.cpp
src/smpi/bindings/smpi_mpi.cpp
src/smpi/bindings/smpi_pmpi.cpp
src/smpi/include/private.hpp
src/smpi/include/smpi_process.hpp
src/smpi/include/smpi_win.hpp
src/smpi/internals/smpi_bench.cpp
src/smpi/internals/smpi_deployment.cpp
src/smpi/internals/smpi_global.cpp
src/smpi/internals/smpi_process.cpp
src/smpi/mpi/smpi_datatype.cpp
src/smpi/mpi/smpi_win.cpp
tools/cmake/DefinePackages.cmake
tools/jenkins/build.sh

diff --git a/.codacy.yml b/.codacy.yml
new file mode 100644 (file)
index 0000000..ab3273b
--- /dev/null
@@ -0,0 +1,6 @@
+---
+engines:
+ coverage:
+   enabled: true
+   exclude_paths:
+     - teshsuite/**
index 7845eb2..8188d3b 100644 (file)
@@ -257,7 +257,7 @@ refer to the <a href="https://wiki.inria.fr/ciportal/">CI documentation</a>.
 The result can be seen here: https://ci.inria.fr/simgrid/
 
 We have 2 interesting projects on Jenkins:
-\li <a href="https://ci.inria.fr/simgrid/job/SimGrid-Multi/">SimGrid-Multi</a>
+\li <a href="https://ci.inria.fr/simgrid/job/SimGrid/">SimGrid</a>
     is the main project, running the tests that we spoke about.\n It is
     configured (on Jenkins) to run the script <tt>tools/jenkins/build.sh</tt>
 \li <a href="https://ci.inria.fr/simgrid/job/SimGrid-DynamicAnalysis/">SimGrid-DynamicAnalysis</a>
@@ -269,7 +269,7 @@ We have 2 interesting projects on Jenkins:
 
 In each case, SimGrid gets built in
 /builds/workspace/$PROJECT/build_mode/$CONFIG/label/$SERVER/build 
-with $PROJECT being for instance "SimGrid-Multi", $CONFIG "DEBUG" or
+with $PROJECT being for instance "SimGrid", $CONFIG "DEBUG" or
 "ModelChecker" and $SERVER for instance "simgrid-fedora20-64-clang".
 
 If some configurations are known to fail on some systems (such as
index 88d880e..8b474c7 100644 (file)
@@ -37,7 +37,7 @@ For Windows, head to [AppVeyor](https://ci.appveyor.com/project/simgrid/simgrid)
 Click on the artefact link on the right, and grab your file. If the latest build failed, there will be no artefact. Then
 you will need to first click on "History" on the top and search for the last successful build.
 
-For non-Windows systems (Linux, Mac or FreeBSD), head to [Jenkins](https://ci.inria.fr/simgrid/job/SimGrid-Multi).
+For non-Windows systems (Linux, Mac or FreeBSD), head to [Jenkins](https://ci.inria.fr/simgrid/job/SimGrid).
 In the build history, pick the last green (or at least yellow) build that is not blinking (i.e., not currently under 
 build). In the list, pick a system that is close to yours, and click on the ball in the Debug row. The build artefact 
 will appear on the top of the resulting page.
diff --git a/include/simgrid/barrier.h b/include/simgrid/barrier.h
new file mode 100644 (file)
index 0000000..72f990e
--- /dev/null
@@ -0,0 +1,22 @@
+/* Public interface to the Link datatype                                    */
+
+/* Copyright (c) 2018. The SimGrid Team. All rights reserved.          */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#ifndef INCLUDE_SIMGRID_BARRIER_H_
+#define INCLUDE_SIMGRID_BARRIER_H_
+
+#include <simgrid/forward.h>
+
+/* C interface */
+SG_BEGIN_DECL()
+
+XBT_PUBLIC sg_bar_t sg_barrier_init(unsigned int count);
+XBT_PUBLIC void sg_barrier_destroy(sg_bar_t bar);
+XBT_PUBLIC int sg_barrier_wait(sg_bar_t bar);
+
+SG_END_DECL()
+
+#endif /* INCLUDE_SIMGRID_BARRIER_H_ */
index 4ce7e57..601cb74 100644 (file)
@@ -20,6 +20,7 @@ class Actor;
 using ActorPtr = boost::intrusive_ptr<Actor>;
 XBT_PUBLIC void intrusive_ptr_release(Actor* actor);
 XBT_PUBLIC void intrusive_ptr_add_ref(Actor* actor);
+class Barrier;
 class Comm;
 using CommPtr = boost::intrusive_ptr<Comm>;
 XBT_PUBLIC void intrusive_ptr_release(Comm* c);
@@ -128,6 +129,7 @@ class VirtualMachineImpl;
 } // namespace simgrid
 
 typedef simgrid::s4u::Actor s4u_Actor;
+typedef simgrid::s4u::Barrier s4u_Barrier;
 typedef simgrid::s4u::Host s4u_Host;
 typedef simgrid::s4u::Link s4u_Link;
 typedef simgrid::s4u::File s4u_File;
@@ -147,6 +149,7 @@ typedef simgrid::surf::StorageImpl* surf_storage_t;
 #else
 
 typedef struct s4u_Actor s4u_Actor;
+typedef struct s4u_Barrier s4u_Barrier;
 typedef struct s4u_Host s4u_Host;
 typedef struct s4u_Link s4u_Link;
 typedef struct s4u_File s4u_File;
@@ -164,6 +167,7 @@ typedef struct s_surf_storage* surf_storage_t;
 
 #endif
 
+typedef s4u_Barrier* sg_bar_t;
 typedef s4u_NetZone* sg_netzone_t;
 typedef s4u_Host* sg_host_t;
 typedef s4u_Link* sg_link_t;
index b562b47..ea93d1b 100644 (file)
@@ -7,6 +7,7 @@
 #define SIMGRID_MSG_H
 
 #include <simgrid/actor.h>
+#include <simgrid/barrier.h>
 #include <simgrid/engine.h>
 #include <simgrid/forward.h>
 #include <simgrid/host.h>
@@ -354,13 +355,7 @@ XBT_PUBLIC int MSG_sem_get_capacity(msg_sem_t sem);
 XBT_PUBLIC void MSG_sem_destroy(msg_sem_t sem);
 XBT_PUBLIC int MSG_sem_would_block(msg_sem_t sem);
 
-/** @brief Opaque type representing a barrier identifier
- *  @ingroup msg_synchro
- *  @hideinitializer
- */
-
-#define MSG_BARRIER_SERIAL_PROCESS -1
-typedef struct s_msg_bar_t* msg_bar_t;
+typedef sg_bar_t msg_bar_t;
 XBT_PUBLIC msg_bar_t MSG_barrier_init(unsigned int count);
 XBT_PUBLIC void MSG_barrier_destroy(msg_bar_t bar);
 XBT_PUBLIC int MSG_barrier_wait(msg_bar_t bar);
index 12e2ae6..51f0939 100644 (file)
@@ -47,80 +47,88 @@ SG_BEGIN_DECL()
 #define MPI_IN_PLACE (void *)-222
 
 // errorcodes
-#define MPI_SUCCESS                    0
-#define MPI_ERR_COMM                   1
-#define MPI_ERR_ARG                    2
-#define MPI_ERR_TYPE                   3
-#define MPI_ERR_REQUEST                4
-#define MPI_ERR_INTERN                 5
-#define MPI_ERR_COUNT                  6
-#define MPI_ERR_RANK                   7
-#define MPI_ERR_TAG                    8
-#define MPI_ERR_TRUNCATE               9
-#define MPI_ERR_GROUP                 10
-#define MPI_ERR_OP                    11
-#define MPI_ERR_OTHER                 12
-#define MPI_ERR_IN_STATUS             13
-#define MPI_ERR_PENDING               14
-#define MPI_ERR_BUFFER                15
-#define MPI_ERR_NAME                  16
-#define MPI_ERR_DIMS                  17
-#define MPI_ERR_TOPOLOGY              18
-#define MPI_ERR_NO_MEM                19
-#define MPI_ERR_WIN                   20
-#define MPI_ERR_INFO_VALUE            21
-#define MPI_ERR_INFO_KEY              22
-#define MPI_ERR_INFO_NOKEY            23
-#define MPI_ERR_ROOT                  24
-#define MPI_ERR_UNKNOWN               25
-#define MPI_ERR_KEYVAL                26
-#define MPI_ERR_BASE                  27
-#define MPI_ERR_SPAWN                 28
-#define MPI_ERR_PORT                  29
-#define MPI_ERR_SERVICE               30
-#define MPI_ERR_SIZE                  31
-#define MPI_ERR_DISP                  32
-#define MPI_ERR_INFO                  33
-#define MPI_ERR_LOCKTYPE              34
-#define MPI_ERR_ASSERT                35
-#define MPI_RMA_CONFLICT              36
-#define MPI_RMA_SYNC                  37
-#define MPI_ERR_FILE                  38
-#define MPI_ERR_NOT_SAME              39
-#define MPI_ERR_AMODE                 40
-#define MPI_ERR_UNSUPPORTED_DATAREP   41
-#define MPI_ERR_UNSUPPORTED_OPERATION 42
-#define MPI_ERR_NO_SUCH_FILE          43
-#define MPI_ERR_FILE_EXISTS           44
-#define MPI_ERR_BAD_FILE              45
-#define MPI_ERR_ACCESS                46
-#define MPI_ERR_NO_SPACE              47
-#define MPI_ERR_QUOTA                 48
-#define MPI_ERR_READ_ONLY             49
-#define MPI_ERR_FILE_IN_USE           50
-#define MPI_ERR_DUP_DATAREP           51
-#define MPI_ERR_CONVERSION            52
-#define MPI_ERR_IO                    53
-#define MPI_ERR_RMA_ATTACH            54
-#define MPI_ERR_RMA_CONFLICT          55
-#define MPI_ERR_RMA_RANGE             56
-#define MPI_ERR_RMA_SHARED            57
-#define MPI_ERR_RMA_SYNC              58
-#define MPI_ERR_RMA_FLAVOR            59
-#define MPI_T_ERR_CANNOT_INIT         60
-#define MPI_T_ERR_NOT_INITIALIZED     61
-#define MPI_T_ERR_MEMORY              62
-#define MPI_T_ERR_INVALID_INDEX       63
-#define MPI_T_ERR_INVALID_ITEM        64
-#define MPI_T_ERR_INVALID_SESSION     65
-#define MPI_T_ERR_INVALID_HANDLE      66
-#define MPI_T_ERR_OUT_OF_HANDLES      67
-#define MPI_T_ERR_OUT_OF_SESSIONS     68
-#define MPI_T_ERR_CVAR_SET_NOT_NOW    69
-#define MPI_T_ERR_CVAR_SET_NEVER      70
-#define MPI_T_ERR_PVAR_NO_WRITE       71
-#define MPI_T_ERR_PVAR_NO_STARTSTOP   72
-#define MPI_T_ERR_PVAR_NO_ATOMIC      73
+#define FOREACH_ERROR(ERROR)                    \
+          ERROR(MPI_SUCCESS)                    \
+          ERROR(MPI_ERR_COMM)                   \
+          ERROR(MPI_ERR_ARG)                    \
+          ERROR(MPI_ERR_TYPE)                   \
+          ERROR(MPI_ERR_REQUEST)                \
+          ERROR(MPI_ERR_INTERN)                 \
+          ERROR(MPI_ERR_COUNT)                  \
+          ERROR(MPI_ERR_RANK)                   \
+          ERROR(MPI_ERR_TAG)                    \
+          ERROR(MPI_ERR_TRUNCATE)               \
+          ERROR(MPI_ERR_GROUP)                  \
+          ERROR(MPI_ERR_OP)                     \
+          ERROR(MPI_ERR_OTHER)                  \
+          ERROR(MPI_ERR_IN_STATUS)              \
+          ERROR(MPI_ERR_PENDING)                \
+          ERROR(MPI_ERR_BUFFER)                 \
+          ERROR(MPI_ERR_NAME)                   \
+          ERROR(MPI_ERR_DIMS)                   \
+          ERROR(MPI_ERR_TOPOLOGY)               \
+          ERROR(MPI_ERR_NO_MEM)                 \
+          ERROR(MPI_ERR_WIN)                    \
+          ERROR(MPI_ERR_INFO_VALUE)             \
+          ERROR(MPI_ERR_INFO_KEY)               \
+          ERROR(MPI_ERR_INFO_NOKEY)             \
+          ERROR(MPI_ERR_ROOT)                   \
+          ERROR(MPI_ERR_UNKNOWN)                \
+          ERROR(MPI_ERR_KEYVAL)                 \
+          ERROR(MPI_ERR_BASE)                   \
+          ERROR(MPI_ERR_SPAWN)                  \
+          ERROR(MPI_ERR_PORT)                   \
+          ERROR(MPI_ERR_SERVICE)                \
+          ERROR(MPI_ERR_SIZE)                   \
+          ERROR(MPI_ERR_DISP)                   \
+          ERROR(MPI_ERR_INFO)                   \
+          ERROR(MPI_ERR_LOCKTYPE)               \
+          ERROR(MPI_ERR_ASSERT)                 \
+          ERROR(MPI_RMA_CONFLICT)               \
+          ERROR(MPI_RMA_SYNC)                   \
+          ERROR(MPI_ERR_FILE)                   \
+          ERROR(MPI_ERR_NOT_SAME)               \
+          ERROR(MPI_ERR_AMODE)                  \
+          ERROR(MPI_ERR_UNSUPPORTED_DATAREP)    \
+          ERROR(MPI_ERR_UNSUPPORTED_OPERATION)  \
+          ERROR(MPI_ERR_NO_SUCH_FILE)           \
+          ERROR(MPI_ERR_FILE_EXISTS)            \
+          ERROR(MPI_ERR_BAD_FILE)               \
+          ERROR(MPI_ERR_ACCESS)                 \
+          ERROR(MPI_ERR_NO_SPACE)               \
+          ERROR(MPI_ERR_QUOTA)                  \
+          ERROR(MPI_ERR_READ_ONLY)              \
+          ERROR(MPI_ERR_FILE_IN_USE)            \
+          ERROR(MPI_ERR_DUP_DATAREP)            \
+          ERROR(MPI_ERR_CONVERSION)             \
+          ERROR(MPI_ERR_IO)                     \
+          ERROR(MPI_ERR_RMA_ATTACH)             \
+          ERROR(MPI_ERR_RMA_CONFLICT)           \
+          ERROR(MPI_ERR_RMA_RANGE)              \
+          ERROR(MPI_ERR_RMA_SHARED)             \
+          ERROR(MPI_ERR_RMA_SYNC)               \
+          ERROR(MPI_ERR_RMA_FLAVOR)             \
+          ERROR(MPI_T_ERR_CANNOT_INIT)          \
+          ERROR(MPI_T_ERR_NOT_INITIALIZED)      \
+          ERROR(MPI_T_ERR_MEMORY)               \
+          ERROR(MPI_T_ERR_INVALID_INDEX)        \
+          ERROR(MPI_T_ERR_INVALID_ITEM)         \
+          ERROR(MPI_T_ERR_INVALID_SESSION)      \
+          ERROR(MPI_T_ERR_INVALID_HANDLE)       \
+          ERROR(MPI_T_ERR_OUT_OF_HANDLES)       \
+          ERROR(MPI_T_ERR_OUT_OF_SESSIONS)      \
+          ERROR(MPI_T_ERR_CVAR_SET_NOT_NOW)     \
+          ERROR(MPI_T_ERR_CVAR_SET_NEVER)       \
+          ERROR(MPI_T_ERR_PVAR_NO_WRITE)        \
+          ERROR(MPI_T_ERR_PVAR_NO_STARTSTOP)    \
+          ERROR(MPI_T_ERR_PVAR_NO_ATOMIC)
+
+#define GENERATE_ENUM(ENUM) ENUM,
+#define GENERATE_STRING(STRING) #STRING,
+
+enum ERROR_ENUM {
+    FOREACH_ERROR(GENERATE_ENUM)
+};
 
 #define MPI_ERRCODES_IGNORE (int *)0
 #define MPI_IDENT     0
@@ -224,7 +232,7 @@ typedef SMPI_Info* MPI_Info;
 #define MPI_STATUS_IGNORE ((MPI_Status*)NULL)
 #define MPI_STATUSES_IGNORE ((MPI_Status*)NULL)
 
-#define MPI_DATATYPE_NULL ((const MPI_Datatype)NULL)
+XBT_PUBLIC_DATA const MPI_Datatype MPI_DATATYPE_NULL;
 XBT_PUBLIC_DATA const MPI_Datatype MPI_CHAR;
 XBT_PUBLIC_DATA const MPI_Datatype MPI_SHORT;
 XBT_PUBLIC_DATA const MPI_Datatype MPI_INT;
index bb1148f..fc75415 100644 (file)
@@ -358,3 +358,18 @@ void MSG_vm_destroy(sg_vm_t vm)
 {
   sg_vm_destroy(vm);
 }
+/********* barriers ************/
+sg_bar_t MSG_barrier_init(unsigned int count)
+{
+  return sg_barrier_init(count);
+}
+
+void MSG_barrier_destroy(sg_bar_t bar)
+{
+  sg_barrier_destroy(bar);
+}
+
+int MSG_barrier_wait(sg_bar_t bar)
+{
+  return sg_barrier_wait(bar);
+}
index bce88e5..99290b1 100644 (file)
@@ -54,45 +54,4 @@ int MSG_sem_would_block(msg_sem_t sem) {
   return simgrid::simix::simcall([sem] { return SIMIX_sem_would_block(sem); });
 }
 
-/*-**** barrier related functions ****-*/
-struct s_msg_bar_t {
-  xbt_mutex_t mutex;
-  xbt_cond_t cond;
-  unsigned int arrived_processes;
-  unsigned int expected_processes;
-};
-
-/** @brief Initializes a barrier, with count elements */
-msg_bar_t MSG_barrier_init(unsigned int count) {
-  msg_bar_t bar           = new s_msg_bar_t;
-  bar->expected_processes = count;
-  bar->arrived_processes  = 0;
-  bar->mutex              = xbt_mutex_init();
-  bar->cond               = xbt_cond_init();
-  return bar;
-}
-
-/** @brief Initializes a barrier, with count elements */
-void MSG_barrier_destroy(msg_bar_t bar) {
-  xbt_mutex_destroy(bar->mutex);
-  xbt_cond_destroy(bar->cond);
-  delete bar;
-}
-
-/** @brief Performs a barrier already initialized */
-int MSG_barrier_wait(msg_bar_t bar) {
-  xbt_mutex_acquire(bar->mutex);
-  bar->arrived_processes++;
-  XBT_DEBUG("waiting %p %u/%u", bar, bar->arrived_processes, bar->expected_processes);
-  if (bar->arrived_processes == bar->expected_processes) {
-    xbt_cond_broadcast(bar->cond);
-    xbt_mutex_release(bar->mutex);
-    bar->arrived_processes = 0;
-    return MSG_BARRIER_SERIAL_PROCESS;
-  }
-
-  xbt_cond_wait(bar->cond, bar->mutex);
-  xbt_mutex_release(bar->mutex);
-  return 0;
-}
 /**@}*/
index 482a164..a8ce588 100644 (file)
@@ -9,6 +9,7 @@
 #include <xbt/ex.hpp>
 #include <xbt/log.hpp>
 
+#include "simgrid/barrier.h"
 #include "simgrid/s4u/Barrier.hpp"
 #include "simgrid/simix.h"
 
@@ -17,10 +18,8 @@ XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_barrier, "S4U barrier");
 namespace simgrid {
 namespace s4u {
 
-Barrier::Barrier(unsigned int count) : expected_processes_(count)
+Barrier::Barrier(unsigned int expected_processes) : mutex_(Mutex::create()), cond_(ConditionVariable::create()), expected_processes_(expected_processes)
 {
-  mutex_ = Mutex::create();
-  cond_  = ConditionVariable::create();
 }
 
 /**
@@ -44,3 +43,22 @@ int Barrier::wait()
 }
 } // namespace s4u
 } // namespace simgrid
+
+/* **************************** Public C interface *************************** */
+
+sg_bar_t sg_barrier_init(unsigned int count)
+{
+  return new simgrid::s4u::Barrier(count);
+}
+
+/** @brief Initializes a barrier, with count elements */
+void sg_barrier_destroy(sg_bar_t bar)
+{
+  delete bar;
+}
+
+/** @brief Performs a barrier already initialized */
+int sg_barrier_wait(sg_bar_t bar)
+{
+  return bar->wait();
+}
index c026309..415a9c7 100644 (file)
@@ -446,6 +446,9 @@ void sg_config_init(int *argc, char **argv)
   simgrid::config::declare_flag<std::string>("smpi/alltoallv", "Which collective to use for alltoallv", "");
   simgrid::config::declare_flag<std::string>("smpi/bcast", "Which collective to use for bcast", "");
   simgrid::config::declare_flag<std::string>("smpi/reduce", "Which collective to use for reduce", "");
+
+  simgrid::config::declare_flag<int>("smpi/plugin/lb/migration-frequency", "After how many migrations should the load balancer"
+                                                        "compute a new mapping?", 10);
 #endif // HAVE_SMPI
 
   /* Others */
index 963409c..93aead0 100644 (file)
@@ -27,6 +27,21 @@ XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_mpi, smpi, "Logging specific to SMPI ,(mpi)
   }
 
 #define WRAPPED_PMPI_CALL(type, name, args, args2)                                                                     \
+  type name args                                                                                                       \
+  {                                                                                                                    \
+    XBT_VERB("SMPI - Entering %s", __func__);                                                                          \
+    type ret = P##name args2;                                                                                         \
+    if(ret!=MPI_SUCCESS) {                                                                                             \
+      char error_string[MPI_MAX_ERROR_STRING];                                                                         \
+      int error_size;                                                                                                  \
+      PMPI_Error_string(ret, error_string, &error_size);                                                               \
+      XBT_DEBUG("%s - returned %.*s instead of MPI_SUCCESS", __func__, error_size,error_string);                       \
+    }                                                                                                                  \
+    XBT_VERB("SMPI - Leaving %s", __func__);                                                                           \
+    return ret;                                                                                                        \
+  }
+
+#define WRAPPED_PMPI_CALL_NORETURN(type, name, args, args2)                                                            \
   type name args                                                                                                       \
   {                                                                                                                    \
     XBT_VERB("SMPI - Entering %s", __func__);                                                                          \
@@ -54,8 +69,8 @@ return P##name args2 ; \
 
 /* MPI User level calls */
 
-WRAPPED_PMPI_CALL(double, MPI_Wtick,(void),())
-WRAPPED_PMPI_CALL(double, MPI_Wtime,(void),())
+WRAPPED_PMPI_CALL_NORETURN(double, MPI_Wtick,(void),())
+WRAPPED_PMPI_CALL_NORETURN(double, MPI_Wtime,(void),())
 WRAPPED_PMPI_CALL(int,MPI_Abort,(MPI_Comm comm, int errorcode),(comm, errorcode))
 WRAPPED_PMPI_CALL(int,MPI_Accumulate,( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win),( origin_addr,origin_count, origin_datatype,target_rank,target_disp, target_count,target_datatype,op, win))
 WRAPPED_PMPI_CALL(int,MPI_Address,(void *location, MPI_Aint * address),(location, address))
@@ -98,6 +113,7 @@ WRAPPED_PMPI_CALL(int,MPI_Compare_and_swap,(void *origin_addr, void *compare_add
         void *result_addr, MPI_Datatype datatype, int target_rank, MPI_Aint target_disp, MPI_Win win), (origin_addr, compare_addr, result_addr, datatype, target_rank, target_disp, win))
 WRAPPED_PMPI_CALL(int,MPI_Dims_create,(int nnodes, int ndims, int* dims) ,(nnodes, ndims, dims))
 WRAPPED_PMPI_CALL(int,MPI_Error_class,(int errorcode, int* errorclass) ,(errorcode, errorclass))
+WRAPPED_PMPI_CALL_NORETURN(int,MPI_Error_string,(int errorcode, char* string, int* resultlen) ,(errorcode, string, resultlen))
 WRAPPED_PMPI_CALL(int,MPI_Exscan,(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm),(sendbuf, recvbuf, count, datatype, op, comm))
 WRAPPED_PMPI_CALL(int,MPI_Finalized,(int * flag),(flag))
 WRAPPED_PMPI_CALL(int,MPI_Finalize,(void),())
@@ -245,20 +261,20 @@ WRAPPED_PMPI_CALL(int,MPI_Win_create_keyval,(MPI_Win_copy_attr_function* copy_fn
                               MPI_Win_delete_attr_function* delete_fn, int* keyval, void* extra_state), (copy_fn, delete_fn, keyval, extra_state))
 WRAPPED_PMPI_CALL(int,MPI_Win_free_keyval,(int* keyval), (keyval))
 WRAPPED_PMPI_CALL(int,MPI_Win_shared_query,(MPI_Win win, int rank, MPI_Aint* size, int* disp_unit, void* baseptr),(win, rank, size, disp_unit, baseptr))
-WRAPPED_PMPI_CALL(MPI_Comm, MPI_Comm_f2c,(MPI_Fint comm),(comm))
-WRAPPED_PMPI_CALL(MPI_Datatype, MPI_Type_f2c,(MPI_Fint datatype),(datatype))
-WRAPPED_PMPI_CALL(MPI_Fint, MPI_Comm_c2f,(MPI_Comm comm),(comm))
-WRAPPED_PMPI_CALL(MPI_Fint, MPI_Group_c2f,(MPI_Group group),(group))
-WRAPPED_PMPI_CALL(MPI_Fint, MPI_Info_c2f,(MPI_Info info),(info))
-WRAPPED_PMPI_CALL(MPI_Fint, MPI_Op_c2f,(MPI_Op op),(op))
-WRAPPED_PMPI_CALL(MPI_Fint, MPI_Request_c2f,(MPI_Request request) ,(request))
-WRAPPED_PMPI_CALL(MPI_Fint, MPI_Type_c2f,(MPI_Datatype datatype),( datatype))
-WRAPPED_PMPI_CALL(MPI_Fint, MPI_Win_c2f,(MPI_Win win),(win))
-WRAPPED_PMPI_CALL(MPI_Group, MPI_Group_f2c,(MPI_Fint group),( group))
-WRAPPED_PMPI_CALL(MPI_Info, MPI_Info_f2c,(MPI_Fint info),(info))
-WRAPPED_PMPI_CALL(MPI_Op, MPI_Op_f2c,(MPI_Fint op),(op))
-WRAPPED_PMPI_CALL(MPI_Request, MPI_Request_f2c,(MPI_Fint request),(request))
-WRAPPED_PMPI_CALL(MPI_Win, MPI_Win_f2c,(MPI_Fint win),(win))
+WRAPPED_PMPI_CALL_NORETURN(MPI_Comm, MPI_Comm_f2c,(MPI_Fint comm),(comm))
+WRAPPED_PMPI_CALL_NORETURN(MPI_Datatype, MPI_Type_f2c,(MPI_Fint datatype),(datatype))
+WRAPPED_PMPI_CALL_NORETURN(MPI_Fint, MPI_Comm_c2f,(MPI_Comm comm),(comm))
+WRAPPED_PMPI_CALL_NORETURN(MPI_Fint, MPI_Group_c2f,(MPI_Group group),(group))
+WRAPPED_PMPI_CALL_NORETURN(MPI_Fint, MPI_Info_c2f,(MPI_Info info),(info))
+WRAPPED_PMPI_CALL_NORETURN(MPI_Fint, MPI_Op_c2f,(MPI_Op op),(op))
+WRAPPED_PMPI_CALL_NORETURN(MPI_Fint, MPI_Request_c2f,(MPI_Request request) ,(request))
+WRAPPED_PMPI_CALL_NORETURN(MPI_Fint, MPI_Type_c2f,(MPI_Datatype datatype),( datatype))
+WRAPPED_PMPI_CALL_NORETURN(MPI_Fint, MPI_Win_c2f,(MPI_Win win),(win))
+WRAPPED_PMPI_CALL_NORETURN(MPI_Group, MPI_Group_f2c,(MPI_Fint group),( group))
+WRAPPED_PMPI_CALL_NORETURN(MPI_Info, MPI_Info_f2c,(MPI_Fint info),(info))
+WRAPPED_PMPI_CALL_NORETURN(MPI_Op, MPI_Op_f2c,(MPI_Fint op),(op))
+WRAPPED_PMPI_CALL_NORETURN(MPI_Request, MPI_Request_f2c,(MPI_Fint request),(request))
+WRAPPED_PMPI_CALL_NORETURN(MPI_Win, MPI_Win_f2c,(MPI_Fint win),(win))
 WRAPPED_PMPI_CALL(int,MPI_Cancel,(MPI_Request* request) ,(request))
 WRAPPED_PMPI_CALL(int, MPI_Test_cancelled,(MPI_Status* status, int* flag) ,(status, flag))
 /*
@@ -299,10 +315,9 @@ UNIMPLEMENTED_WRAPPED_PMPI_CALL_NOFAIL(int,MPI_Errhandler_create,(MPI_Handler_fu
 UNIMPLEMENTED_WRAPPED_PMPI_CALL_NOFAIL(int,MPI_Errhandler_free,(MPI_Errhandler* errhandler) ,(errhandler))
 UNIMPLEMENTED_WRAPPED_PMPI_CALL_NOFAIL(int,MPI_Errhandler_get,(MPI_Comm comm, MPI_Errhandler* errhandler) ,(comm, errhandler))
 UNIMPLEMENTED_WRAPPED_PMPI_CALL_NOFAIL(int,MPI_Errhandler_set,(MPI_Comm comm, MPI_Errhandler errhandler) ,(comm, errhandler))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL_NOFAIL(int,MPI_Error_string,(int errorcode, char* string, int* resultlen) ,(errorcode, string, resultlen))
 UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Register_datarep, (char *datarep, MPI_Datarep_conversion_function *read_conversion_fn, MPI_Datarep_conversion_function *write_conversion_fn, MPI_Datarep_extent_function *dtype_file_extent_fn, void *extra_state) ,(datarep, read_conversion_fn, write_conversion_fn, dtype_file_extent_fn, extra_state))
 UNIMPLEMENTED_WRAPPED_PMPI_CALL(MPI_Fint, MPI_File_c2f,(MPI_File file), (file))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(MPI_File, MPI_File_f2c,(MPI_Fint file), (file))
+//UNIMPLEMENTED_WRAPPED_PMPI_CALL(MPI_File, MPI_File_f2c,(MPI_Fint file), (file))
 UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_call_errhandler,(MPI_File fh, int errorcode), (fh, errorcode))
 UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_create_errhandler,(MPI_File_errhandler_function *function, MPI_Errhandler *errhandler),(function, errhandler))
 UNIMPLEMENTED_WRAPPED_PMPI_CALL_NOFAIL(int, MPI_File_set_errhandler,( MPI_File file, MPI_Errhandler errhandler), (file, errhandler))
@@ -396,5 +411,5 @@ UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Unpublish_name,( char *service_name, MPI
 UNIMPLEMENTED_WRAPPED_PMPI_CALL_NOFAIL(int,MPI_Win_set_errhandler,(MPI_Win win, MPI_Errhandler errhandler) ,(win, errhandler))
 UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Win_test,(MPI_Win win, int *flag),(win, flag))
 UNIMPLEMENTED_WRAPPED_PMPI_CALL_NOFAIL(int,MPI_Win_sync,(MPI_Win win),(win))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL_NOFAIL(MPI_Errhandler, MPI_Errhandler_f2c,(MPI_Fint errhandler),(errhandler))
+//UNIMPLEMENTED_WRAPPED_PMPI_CALL_NOFAIL(MPI_Errhandler, MPI_Errhandler_f2c,(MPI_Fint errhandler),(errhandler))
 UNIMPLEMENTED_WRAPPED_PMPI_CALL_NOFAIL(MPI_Fint, MPI_Errhandler_c2f,(MPI_Errhandler errhandler),(errhandler))
index b6dfc7e..b0f7cc3 100644 (file)
@@ -208,6 +208,15 @@ int PMPI_Error_class(int errorcode, int* errorclass) {
   return MPI_SUCCESS;
 }
 
+int PMPI_Error_string(int errorcode, char* string, int* resultlen){
+  static const char *smpi_error_string[] = {
+    FOREACH_ERROR(GENERATE_STRING)
+  };
+  *resultlen = strlen(smpi_error_string[errorcode]);
+  strncpy(string, smpi_error_string[errorcode], *resultlen);
+  return MPI_SUCCESS;  
+}
+
 int PMPI_Keyval_create(MPI_Copy_function* copy_fn, MPI_Delete_function* delete_fn, int* keyval, void* extra_state) {
   smpi_copy_fn _copy_fn={copy_fn,nullptr,nullptr};
   smpi_delete_fn _delete_fn={delete_fn,nullptr,nullptr};
index bac1653..502cee5 100644 (file)
@@ -6,7 +6,7 @@
 #ifndef SMPI_PRIVATE_HPP
 #define SMPI_PRIVATE_HPP
 
-#include "simgrid/msg.h" // msg_bar_t
+#include "simgrid/s4u/Barrier.hpp"
 #include "smpi/smpi.h"
 #include "smpi/smpi_helpers_internal.h"
 #include "src/instr/instr_smpi.hpp"
@@ -66,7 +66,7 @@ XBT_PRIVATE int smpi_process_count();
 XBT_PRIVATE void smpi_deployment_register_process(const std::string instance_id, int rank,
                                                   simgrid::s4u::ActorPtr actor);
 XBT_PRIVATE MPI_Comm* smpi_deployment_comm_world(const std::string instance_id);
-XBT_PRIVATE msg_bar_t smpi_deployment_finalization_barrier(const std::string instance_id);
+XBT_PRIVATE simgrid::s4u::Barrier* smpi_deployment_finalization_barrier(const std::string instance_id);
 XBT_PRIVATE void smpi_deployment_cleanup_instances();
 
 XBT_PRIVATE void smpi_comm_copy_buffer_callback(smx_activity_t comm, void* buff, size_t buff_size);
index e7c6772..eaa3ed1 100644 (file)
@@ -32,7 +32,7 @@ class Process {
     int sampling_                   = 0; /* inside an SMPI_SAMPLE_ block? */
     std::string instance_id_;
     bool replaying_                 = false; /* is the process replaying a trace */
-    msg_bar_t finalization_barrier_;
+    simgrid::s4u::Barrier* finalization_barrier_;
     smpi_trace_call_location_t trace_call_loc_;
     simgrid::s4u::ActorPtr actor_ = nullptr;
     smpi_privatization_region_t privatized_region_;
@@ -43,7 +43,7 @@ class Process {
     papi_counter_t papi_counter_data_;
 #endif
   public:
-    explicit Process(simgrid::s4u::ActorPtr actor, msg_bar_t barrier);
+    explicit Process(simgrid::s4u::ActorPtr actor, simgrid::s4u::Barrier* barrier);
     ~Process();
     void set_data(int* argc, char*** argv);
     void finalize();
index d29f96a..ec89c1d 100644 (file)
@@ -7,10 +7,10 @@
 #ifndef SMPI_WIN_HPP_INCLUDED
 #define SMPI_WIN_HPP_INCLUDED
 
+#include "simgrid/s4u/Barrier.hpp"
 #include "smpi_f2c.hpp"
 #include "smpi_keyvals.hpp"
 #include "xbt/synchro.h"
-#include <simgrid/msg.h>
 
 #include <vector>
 #include <list>
@@ -28,7 +28,7 @@ class Win : public F2C, public Keyval {
   MPI_Comm comm_;
   std::vector<MPI_Request> *requests_;
   xbt_mutex_t mut_;
-  msg_bar_t bar_;
+  simgrid::s4u::Barrier* bar_;
   MPI_Win* connected_wins_;
   char* name_;
   int opened_;
index ac7f78c..f7101a3 100644 (file)
@@ -166,7 +166,7 @@ void smpi_bench_end()
 
     for (auto const& pair : counter_data) {
       simgrid::instr::VariableType* variable = static_cast<simgrid::instr::VariableType*>(container->type_->by_name(pair.first));
-      variable->set_event(surf_get_clock(), pair.second);
+      variable->set_event(SIMIX_get_clock(), pair.second);
     }
   }
 #endif
index 6c1c75c..72b798a 100644 (file)
@@ -6,7 +6,6 @@
 
 #include "smpi_host.hpp"
 #include "private.hpp"
-#include "simgrid/msg.h" /* barrier */
 #include "simgrid/s4u/Engine.hpp"
 #include "smpi_comm.hpp"
 #include <map>
@@ -18,7 +17,7 @@ namespace app {
 class Instance {
 public:
   Instance(const std::string name, int max_no_processes, int process_count, MPI_Comm comm,
-           msg_bar_t finalization_barrier)
+           simgrid::s4u::Barrier* finalization_barrier)
       : name(name)
       , size(max_no_processes)
       , present_processes(0)
@@ -30,7 +29,7 @@ public:
   int size;
   int present_processes;
   MPI_Comm comm_world;
-  msg_bar_t finalization_barrier;
+  simgrid::s4u::Barrier* finalization_barrier;
 };
 }
 }
@@ -63,10 +62,13 @@ void SMPI_app_instance_register(const char *name, xbt_main_func_t code, int num_
     }
   }
 
-  Instance instance(std::string(name), num_processes, process_count, MPI_COMM_NULL, MSG_barrier_init(num_processes));
+  Instance instance(std::string(name), num_processes, process_count, MPI_COMM_NULL,
+                    new simgrid::s4u::Barrier(num_processes));
   MPI_Group group     = new simgrid::smpi::Group(instance.size);
   instance.comm_world = new simgrid::smpi::Comm(group, nullptr);
-  MPI_Attr_put(instance.comm_world, MPI_UNIVERSE_SIZE, reinterpret_cast<void*>(instance.size));
+//  FIXME : using MPI_Attr_put with MPI_UNIVERSE_SIZE is forbidden and we make it a no-op (which triggers a warning as MPI_ERR_ARG is returned). 
+//  Directly calling Comm::attr_put breaks for now, as MPI_UNIVERSE_SIZE,is <0
+//  instance.comm_world->attr_put<simgrid::smpi::Comm>(MPI_UNIVERSE_SIZE, reinterpret_cast<void*>(instance.size));
 
   process_count+=num_processes;
 
@@ -90,7 +92,7 @@ MPI_Comm* smpi_deployment_comm_world(const std::string instance_id)
   return &instance.comm_world;
 }
 
-msg_bar_t smpi_deployment_finalization_barrier(const std::string instance_id)
+simgrid::s4u::Barrier* smpi_deployment_finalization_barrier(const std::string instance_id)
 {
   if (smpi_instances.empty()) { // no instance registered, we probably used smpirun.
     return nullptr;
@@ -102,7 +104,7 @@ msg_bar_t smpi_deployment_finalization_barrier(const std::string instance_id)
 void smpi_deployment_cleanup_instances(){
   for (auto const& item : smpi_instances) {
     Instance instance = item.second;
-    MSG_barrier_destroy(instance.finalization_barrier);
+    delete instance.finalization_barrier;
     simgrid::smpi::Comm::destroy(instance.comm_world);
   }
   smpi_instances.clear();
index 3ce72e1..bb67c34 100644 (file)
@@ -366,7 +366,7 @@ static void smpi_init_options(){
   simgrid::smpi::Colls::smpi_coll_cleanup_callback = nullptr;
   smpi_cpu_threshold                               = simgrid::config::get_value<double>("smpi/cpu-threshold");
   smpi_host_speed                                  = simgrid::config::get_value<double>("smpi/host-speed");
-  xbt_assert(smpi_host_speed >= 0, "You're trying to set the host_speed to a negative value (%f)", smpi_host_speed);
+  xbt_assert(smpi_host_speed > 0.0, "You're trying to set the host_speed to a non-positive value (given: %f)", smpi_host_speed);
   std::string smpi_privatize_option = simgrid::config::get_value<std::string>("smpi/privatization");
   if (smpi_privatize_option == "no" || smpi_privatize_option == "0")
     smpi_privatize_global_variables = SmpiPrivStrategies::NONE;
index 661968f..996fc71 100644 (file)
@@ -23,7 +23,7 @@ namespace smpi{
 using simgrid::s4u::Actor;
 using simgrid::s4u::ActorPtr;
 
-Process::Process(ActorPtr actor, msg_bar_t finalization_barrier)
+Process::Process(ActorPtr actor, simgrid::s4u::Barrier* finalization_barrier)
     : finalization_barrier_(finalization_barrier), actor_(actor)
 {
   mailbox_         = simgrid::s4u::Mailbox::by_name("SMPI-" + std::to_string(actor_->get_pid()));
@@ -66,7 +66,7 @@ void Process::set_data(int* argc, char*** argv)
 {
   instance_id_      = std::string((*argv)[1]);
   comm_world_       = smpi_deployment_comm_world(instance_id_);
-  msg_bar_t barrier = smpi_deployment_finalization_barrier(instance_id_);
+  simgrid::s4u::Barrier* barrier = smpi_deployment_finalization_barrier(instance_id_);
   if (barrier != nullptr) // don't overwrite the current one if the instance has none
     finalization_barrier_ = barrier;
 
@@ -96,7 +96,7 @@ void Process::finalize()
   if(MC_is_active() || MC_record_replay_is_active())
     return;
   // wait for all pending asynchronous comms to finish
-  MSG_barrier_wait(finalization_barrier_);
+  finalization_barrier_->wait();
 }
 
 /** @brief Check if a process is finalized */
index 5e752c1..3c04c85 100644 (file)
@@ -76,6 +76,7 @@ CREATE_MPI_DATATYPE(MPI_REAL, 38, float);
 CREATE_MPI_DATATYPE(MPI_REAL4, 39, float);
 CREATE_MPI_DATATYPE(MPI_REAL8, 40, float);
 CREATE_MPI_DATATYPE(MPI_REAL16, 41, double);
+CREATE_MPI_DATATYPE_NULL(MPI_DATATYPE_NULL, -1);
 CREATE_MPI_DATATYPE_NULL(MPI_COMPLEX8, 42);
 CREATE_MPI_DATATYPE_NULL(MPI_COMPLEX16, 43);
 CREATE_MPI_DATATYPE_NULL(MPI_COMPLEX32, 44);
@@ -205,9 +206,6 @@ bool Datatype::is_basic()
 
 const char* Datatype::encode(MPI_Datatype dt)
 {
-  if (dt == MPI_DATATYPE_NULL)
-    return "-1";
-
   return dt->id.c_str();
 }
 
index c082e48..5e00ce5 100644 (file)
@@ -39,7 +39,7 @@ Win::Win(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm,
   connected_wins_[rank_] = this;
   count_                 = 0;
   if(rank_==0){
-    bar_ = MSG_barrier_init(comm_size);
+    bar_ = new simgrid::s4u::Barrier(comm_size);
   }
   mode_=0;
 
@@ -49,14 +49,14 @@ Win::Win(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm,
   Colls::allgather(&(connected_wins_[rank_]), sizeof(MPI_Win), MPI_BYTE, connected_wins_, sizeof(MPI_Win),
                          MPI_BYTE, comm);
 
-  Colls::bcast(&(bar_), sizeof(msg_bar_t), MPI_BYTE, 0, comm);
+  Colls::bcast(&(bar_), sizeof(simgrid::s4u::Barrier*), MPI_BYTE, 0, comm);
 
   Colls::barrier(comm);
 }
 
 Win::~Win(){
   //As per the standard, perform a barrier to ensure every async comm is finished
-  MSG_barrier_wait(bar_);
+  bar_->wait();
 
   int finished = finish_comms();
   XBT_DEBUG("Win destructor - Finished %d RMA calls", finished);
@@ -76,7 +76,7 @@ Win::~Win(){
   Comm::unref(comm_);
   
   if (rank_ == 0)
-    MSG_barrier_destroy(bar_);
+    delete bar_;
   xbt_mutex_destroy(mut_);
   xbt_mutex_destroy(lock_mut_);
   xbt_mutex_destroy(atomic_mut_);
@@ -163,7 +163,7 @@ int Win::fence(int assert)
     opened_=1;
   if (assert != MPI_MODE_NOPRECEDE) {
     // This is not the first fence => finalize what came before
-    MSG_barrier_wait(bar_);
+    bar_->wait();
     xbt_mutex_acquire(mut_);
     // This (simulated) mutex ensures that no process pushes to the vector of requests during the waitall.
     // Without this, the vector could get redimensionned when another process pushes.
@@ -184,7 +184,7 @@ int Win::fence(int assert)
     opened_=0;
   assert_ = assert;
 
-  MSG_barrier_wait(bar_);
+  bar_->wait();
   XBT_DEBUG("Leaving fence");
 
   return MPI_SUCCESS;
@@ -629,9 +629,9 @@ int Win::unlock_all(){
   int i=0;
   int retval = MPI_SUCCESS;
   for (i=0; i<comm_->size();i++){
-      int ret = this->unlock(i);
-      if(ret != MPI_SUCCESS)
-        retval = ret;
+    int ret = this->unlock(i);
+    if (ret != MPI_SUCCESS)
+      retval = ret;
   }
   return retval;
 }
@@ -652,11 +652,9 @@ int Win::flush_local(int rank){
 }
 
 int Win::flush_all(){
-  int i=0;
-  int finished = 0;
-  finished = finish_comms();
+  int finished = finish_comms();
   XBT_DEBUG("Win_flush_all on local - Finished %d RMA calls", finished);
-  for (i=0; i<comm_->size();i++){
+  for (int i = 0; i < comm_->size(); i++) {
     finished = connected_wins_[i]->finish_comms(rank_);
     XBT_DEBUG("Win_flush_all on %d - Finished %d RMA calls", i, finished);
   }
@@ -673,7 +671,6 @@ Win* Win::f2c(int id){
   return static_cast<Win*>(F2C::f2c(id));
 }
 
-
 int Win::finish_comms(){
   xbt_mutex_acquire(mut_);
   //Finish own requests
index 588be4a..493b62f 100644 (file)
@@ -662,6 +662,7 @@ set(headers_to_install
 
   include/simgrid_config.h
   include/simgrid/actor.h
+  include/simgrid/barrier.h
   include/simgrid/engine.h
   include/simgrid/chrono.hpp
   include/simgrid/plugins/dvfs.h
index 264f755..d4a0007 100755 (executable)
@@ -2,7 +2,7 @@
 
 # This script is used by various build projects on Jenkins
 
-# See https://ci.inria.fr/simgrid/job/SimGrid-Multi/configure
+# See https://ci.inria.fr/simgrid/job/SimGrid/configure
 # See https://ci.inria.fr/simgrid/job/Simgrid-Windows/configure
 
 set -e
@@ -13,7 +13,7 @@ export LC_ALL=C
 echo "XXXX Cleanup previous attempts. Remaining content of /tmp:"
 rm -rf /tmp/simgrid-java*
 rm -rf /tmp/jvm-*
-find /builds/workspace/SimGrid-Multi/ -name "hs_err_pid*.log" | xargs rm -f
+find /builds/workspace/SimGrid/ -name "hs_err_pid*.log" | xargs rm -f
 ls /tmp
 df -h
 echo "XXXX Let's go"