Use them almost everywhere, for sake of consistency.
Required by Sonar rule cpp:S967 (There shall be at most one occurrence of
the # or ## operators in a single macro definition).
#include <smpi/smpi.h>
#define AMPI_CALL(type, name, args) \
- type A##name args __attribute__((weak)); \
- type AP##name args;
+ type _XBT_CONCAT(A, name) args __attribute__((weak)); \
+ type _XBT_CONCAT(AP, name) args;
#ifndef HAVE_SMPI
// Internally disable these overrides (HAVE_SMPI is only defined when building the library)
#include <xbt/misc.h>
#ifdef _WIN32
-#define MPI_CALL(type,name,args) \
- type name args; \
- type P##name args
+#define MPI_CALL(type, name, args) \
+ type name args; \
+ type _XBT_CONCAT(P, name) args
#else
-#define MPI_CALL(type,name,args) \
- type name args __attribute__((weak)); \
- type P##name args
+#define MPI_CALL(type, name, args) \
+ type name args __attribute__((weak)); \
+ type _XBT_CONCAT(P, name) args
#endif
SG_BEGIN_DECL()
ERROR(MPI_T_ERR_PVAR_NO_ATOMIC)
#define GENERATE_ENUM(ENUM) ENUM,
-#define GENERATE_STRING(STRING) #STRING,
+#define GENERATE_STRING(STRING) _XBT_STRINGIFY(STRING),
enum ERROR_ENUM {
FOREACH_ERROR(GENERATE_ENUM)
/** Fortran binding + -fsecond-underscore **/
XBT_PUBLIC void smpi_trace_set_call_location__(const char* file, int* line);
-#define SMPI_ITER_NAME1(line) iter_count##line
+#define SMPI_ITER_NAME1(line) _XBT_CONCAT(iter_count, line)
#define SMPI_ITER_NAME(line) SMPI_ITER_NAME1(line)
#define SMPI_SAMPLE_LOOP(loop_init, loop_end, loop_iter, global, iters, thres)\
int SMPI_ITER_NAME(__LINE__)=0;\
XBT_PUBLIC int smpi_shared_known_call(const char* func, const char* input);
XBT_PUBLIC void* smpi_shared_get_call(const char* func, const char* input);
XBT_PUBLIC void* smpi_shared_set_call(const char* func, const char* input, void* data);
-#define SMPI_SHARED_CALL(func, input, ...) \
- (smpi_shared_known_call(#func, input) ? smpi_shared_get_call(#func, input) \
- : smpi_shared_set_call(#func, input, (func(__VA_ARGS__))))
+#define SMPI_SHARED_CALL(func, input, ...) \
+ (smpi_shared_known_call(_XBT_STRINGIFY(func), input) \
+ ? smpi_shared_get_call(_XBT_STRINGIFY(func), input) \
+ : smpi_shared_set_call(_XBT_STRINGIFY(func), input, (func(__VA_ARGS__))))
/* Fortran specific stuff */
# endif
#endif
+/* Stringify argument. */
+#define _XBT_STRINGIFY(a) #a
+
+/* Concatenate arguments. _XBT_CONCAT2 adds a level of indirection over _XBT_CONCAT. */
+#define _XBT_CONCAT(a, b) a##b
+#define _XBT_CONCAT2(a, b) _XBT_CONCAT(a, b)
+#define _XBT_CONCAT3(a, b, c) _XBT_CONCAT2(_XBT_CONCAT(a, b), c)
+#define _XBT_CONCAT4(a, b, c, d) _XBT_CONCAT2(_XBT_CONCAT3(a, b, c), d)
+
/*
* Expands to `one' if there is only one argument for the variadic part.
* Otherwise, expands to `more'.
#endif /* !defined(NLOG) */
/* Transforms a category name to a global variable name. */
-#define _XBT_LOGV(cat) _XBT_LOG_CONCAT(_simgrid_log_category__, cat)
-#define _XBT_LOGV_CTOR(cat) _XBT_LOG_CONCAT2(_XBT_LOGV(cat), __constructor__)
-#define _XBT_LOG_CONCAT(x, y) x ## y
-#define _XBT_LOG_CONCAT2(x, y) _XBT_LOG_CONCAT(x, y)
+#define _XBT_LOGV(cat) _XBT_CONCAT(_simgrid_log_category__, cat)
+#define _XBT_LOGV_CTOR(cat) _XBT_CONCAT2(_XBT_LOGV(cat), __constructor__)
/* The root of the category hierarchy. */
#define XBT_LOG_ROOT_CAT root
&_XBT_LOGV(parent), \
NULL /* firstChild */, \
NULL /* nextSibling */, \
- #catName, \
+ _XBT_STRINGIFY(catName), \
desc, \
0 /*initialized */, \
xbt_log_priority_uninitialized /* threshold */, \
void sglua_register_platf_functions(lua_State* L);
#define lua_ensure(...) _XBT_IF_ONE_ARG(_lua_ensure_ARG1, _lua_ensure_ARGN, __VA_ARGS__)(__VA_ARGS__)
-#define _lua_ensure_ARG1(cond) _lua_ensure_ARGN(cond, "Assertion %s failed", #cond)
+#define _lua_ensure_ARG1(cond) _lua_ensure_ARGN(cond, "Assertion " _XBT_STRINGIFY(cond) " failed")
#define _lua_ensure_ARGN(cond, ...) \
do { \
if (!(cond)) { \
type name args \
{ \
XBT_VERB("SMPI - Entering %s", __func__); \
- type ret = P##name args2; \
- if(ret!=MPI_SUCCESS) { \
+ type ret = _XBT_CONCAT(P, name) args2; \
+ if (ret != MPI_SUCCESS) { \
char error_string[MPI_MAX_ERROR_STRING]; \
int error_size; \
PMPI_Error_string(ret, error_string, &error_size); \
- XBT_WARN("%s - returned %.*s instead of MPI_SUCCESS", __func__, error_size,error_string); \
+ XBT_WARN("%s - returned %.*s instead of MPI_SUCCESS", __func__, error_size, error_string); \
} \
XBT_VERB("SMPI - Leaving %s", __func__); \
return ret; \
type name args \
{ \
XBT_VERB("SMPI - Entering %s", __func__); \
- type ret = P##name args2; \
+ type ret = _XBT_CONCAT(P, name) args2; \
XBT_VERB("SMPI - Leaving %s", __func__); \
return ret; \
}
-#define UNIMPLEMENTED_WRAPPED_PMPI_CALL(type,name,args,args2) \
-type P##name args { \
-NOT_YET_IMPLEMENTED \
-}\
-type name args { \
-return P##name args2 ; \
-}\
-
-#define UNIMPLEMENTED_WRAPPED_PMPI_CALL_NOFAIL(type,name,args,args2) \
-type P##name args { \
-NOT_YET_IMPLEMENTED_NOFAIL \
-}\
-type name args { \
-return P##name args2 ; \
-}\
+#define UNIMPLEMENTED_WRAPPED_PMPI_CALL(type, name, args, args2) \
+ type _XBT_CONCAT(P, name) args { NOT_YET_IMPLEMENTED } \
+ type name args { return _XBT_CONCAT(P, name) args2; }
+#define UNIMPLEMENTED_WRAPPED_PMPI_CALL_NOFAIL(type, name, args, args2) \
+ type _XBT_CONCAT(P, name) args { NOT_YET_IMPLEMENTED_NOFAIL } \
+ type name args { return _XBT_CONCAT(P, name) args2; }
/* MPI User level calls */
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Mprobe,(int source, int tag, MPI_Comm comm, MPI_Message *message, MPI_Status* status) ,(source, tag, comm, message, status))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Mrecv,(void *buf, int count, MPI_Datatype datatype, MPI_Message *message, MPI_Status* status),(buf, count, datatype, message, status))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Improbe,(int source, int tag, MPI_Comm comm, int* flag, MPI_Message *message, MPI_Status* status) ,(source, tag, comm, flag, message, status))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Imrecv,(void *buf, int count, MPI_Datatype datatype, MPI_Message *message, MPI_Request *request),(buf, count, datatype, message, request))
\ No newline at end of file
+UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Imrecv,(void *buf, int count, MPI_Datatype datatype, MPI_Message *message, MPI_Request *request),(buf, count, datatype, message, request))
#define TRACE_AUTO_COLL(cat) \
if (TRACE_is_enabled()) { \
simgrid::instr::EventType* type = \
- simgrid::instr::Container::get_root()->type_->by_name_or_create<simgrid::instr::EventType>(#cat); \
+ simgrid::instr::Container::get_root()->type_->by_name_or_create<simgrid::instr::EventType>( \
+ _XBT_STRINGIFY(cat)); \
\
std::string cont_name = std::string("rank-" + std::to_string(simgrid::s4u::this_actor::get_pid())); \
- type->add_entity_value(Colls::mpi_coll_##cat##_description[i].name, "1.0 1.0 1.0"); \
+ type->add_entity_value(Colls::_XBT_CONCAT3(mpi_coll_, cat, _description)[i].name, "1.0 1.0 1.0"); \
new simgrid::instr::NewEvent(SIMIX_get_clock(), simgrid::instr::Container::by_name(cont_name), type, \
- type->get_entity_value(Colls::mpi_coll_##cat##_description[i].name)); \
+ type->get_entity_value(Colls::_XBT_CONCAT3(mpi_coll_, cat, _description)[i].name)); \
}
#define AUTOMATIC_COLL_BENCH(cat, ret, args, args2) \
- ret Coll_##cat##_automatic::cat(COLL_UNPAREN args) \
+ ret _XBT_CONCAT3(Coll_, cat, _automatic)::cat(COLL_UNPAREN args) \
{ \
double time1, time2, time_min = DBL_MAX; \
int min_coll = -1, global_coll = -1; \
int i; \
double buf_in, buf_out, max_min = DBL_MAX; \
- for (i = 0; not Colls::mpi_coll_##cat##_description[i].name.empty(); i++) { \
- if (Colls::mpi_coll_##cat##_description[i].name == "automatic") \
+ for (i = 0; not Colls::_XBT_CONCAT3(mpi_coll_, cat, _description)[i].name.empty(); i++) { \
+ if (Colls::_XBT_CONCAT3(mpi_coll_, cat, _description)[i].name == "automatic") \
continue; \
- if (Colls::mpi_coll_##cat##_description[i].name == "default") \
+ if (Colls::_XBT_CONCAT3(mpi_coll_, cat, _description)[i].name == "default") \
continue; \
Coll_barrier_default::barrier(comm); \
TRACE_AUTO_COLL(cat) \
time1 = SIMIX_get_clock(); \
try { \
- ((int(*) args)Colls::mpi_coll_##cat##_description[i].coll) args2; \
+ ((int(*) args)Colls::_XBT_CONCAT3(mpi_coll_, cat, _description)[i].coll) args2; \
} catch (std::exception & ex) { \
continue; \
} \
} \
if (comm->rank() == 0) { \
XBT_WARN("For rank 0, the quickest was %s : %f , but global was %s : %f at max", \
- Colls::mpi_coll_##cat##_description[min_coll].name.c_str(), time_min, \
- Colls::mpi_coll_##cat##_description[global_coll].name.c_str(), max_min); \
+ Colls::_XBT_CONCAT3(mpi_coll_, cat, _description)[min_coll].name.c_str(), time_min, \
+ Colls::_XBT_CONCAT3(mpi_coll_, cat, _description)[global_coll].name.c_str(), max_min); \
} else \
- XBT_WARN("The quickest %s was %s on rank %d and took %f", #cat, \
- Colls::mpi_coll_##cat##_description[min_coll].name.c_str(), comm->rank(), time_min); \
+ XBT_WARN("The quickest " _XBT_STRINGIFY(cat) " was %s on rank %d and took %f", \
+ Colls::_XBT_CONCAT3(mpi_coll_, cat, _description)[min_coll].name.c_str(), comm->rank(), time_min); \
return (min_coll != -1) ? MPI_SUCCESS : MPI_ERR_INTERN; \
}
#define COLL_SETTER(cat, ret, args, args2) \
int(*Colls::cat) args; \
- void Colls::set_##cat(const std::string& name) \
+ void Colls::_XBT_CONCAT(set_, cat)(const std::string& name) \
{ \
- int id = find_coll_description(mpi_coll_##cat##_description, name, #cat); \
- cat = reinterpret_cast<ret(*) args>(mpi_coll_##cat##_description[id].coll); \
+ int id = find_coll_description(_XBT_CONCAT3(mpi_coll_, cat, _description), name, _XBT_STRINGIFY(cat)); \
+ cat = reinterpret_cast<ret(*) args>(_XBT_CONCAT3(mpi_coll_, cat, _description)[id].coll); \
if (cat == nullptr) \
- xbt_die("Collective " #cat " set to nullptr!"); \
+ xbt_die("Collective " _XBT_STRINGIFY(cat) " set to nullptr!"); \
}
namespace simgrid{
#define SIZECOMP_alltoallv\
size_t block_dsize = 1;
-#define IMPI_COLL_SELECT(cat, ret, args, args2)\
-ret Coll_ ## cat ## _impi:: cat (COLL_UNPAREN args)\
-{\
- int comm_size = comm->size();\
- int i =0;\
- SIZECOMP_ ## cat\
- i=0;\
- int j =0, k=0;\
- if(comm->get_leaders_comm()==MPI_COMM_NULL){\
- comm->init_smp();\
- }\
- int local_size=1;\
- if (comm->is_uniform()) {\
- local_size = comm->get_intra_comm()->size();\
- }\
- while(i < INTEL_MAX_NB_PPN &&\
- local_size!=intel_ ## cat ## _table[i].ppn)\
- i++;\
- if(i==INTEL_MAX_NB_PPN) i=0;\
- while(comm_size>intel_ ## cat ## _table[i].elems[j].max_num_proc\
- && j < INTEL_MAX_NB_THRESHOLDS)\
- j++;\
- while(block_dsize >=intel_ ## cat ## _table[i].elems[j].elems[k].max_size\
- && k< intel_ ## cat ## _table[i].elems[j].num_elems)\
- k++;\
- return (intel_ ## cat ## _functions_table[intel_ ## cat ## _table[i].elems[j].elems[k].algo-1]\
- args2);\
-}
-
+#define IMPI_COLL_SELECT(cat, ret, args, args2) \
+ ret _XBT_CONCAT3(Coll_, cat, _impi)::cat(COLL_UNPAREN args) \
+ { \
+ int comm_size = comm->size(); \
+ int i = 0; \
+ _XBT_CONCAT(SIZECOMP_, cat) \
+ i = 0; \
+ int j = 0, k = 0; \
+ if (comm->get_leaders_comm() == MPI_COMM_NULL) { \
+ comm->init_smp(); \
+ } \
+ int local_size = 1; \
+ if (comm->is_uniform()) { \
+ local_size = comm->get_intra_comm()->size(); \
+ } \
+ while (i < INTEL_MAX_NB_PPN && local_size != _XBT_CONCAT3(intel_, cat, _table)[i].ppn) \
+ i++; \
+ if (i == INTEL_MAX_NB_PPN) \
+ i = 0; \
+ while (comm_size > _XBT_CONCAT3(intel_, cat, _table)[i].elems[j].max_num_proc && j < INTEL_MAX_NB_THRESHOLDS) \
+ j++; \
+ while (block_dsize >= _XBT_CONCAT3(intel_, cat, _table)[i].elems[j].elems[k].max_size && \
+ k < _XBT_CONCAT3(intel_, cat, _table)[i].elems[j].num_elems) \
+ k++; \
+ return (_XBT_CONCAT3(intel_, cat, \
+ _functions_table)[_XBT_CONCAT3(intel_, cat, _table)[i].elems[j].elems[k].algo - 1] args2); \
+ }
COLL_APPLY(IMPI_COLL_SELECT, COLL_ALLGATHERV_SIG, (send_buff, send_count, send_type, recv_buff, recv_count, recv_disps, recv_type, comm));
COLL_APPLY(IMPI_COLL_SELECT, COLL_ALLREDUCE_SIG, (sbuf, rbuf, rcount, dtype, op, comm));
/** @brief MPI collective description */
#define COLL_DEFS(cat, ret, args, args2) \
- static void set_##cat(const std::string& name); \
- static s_mpi_coll_description_t mpi_coll_##cat##_description[]; \
+ static void _XBT_CONCAT(set_, cat)(const std::string& name); \
+ static s_mpi_coll_description_t _XBT_CONCAT3(mpi_coll_, cat, _description)[]; \
static int(*cat) args;
#define COLL_SIG(cat, ret, args, args2)\
static int cat args;
-
-#define COLL_DESCRIPTION(cat, ret, args, name) \
- {# name,\
- # cat " " # name " collective",\
- (void*) Coll_ ## cat ## _ ## name::cat }
-
-#define COLL_PROTO(cat, ret, args, name) \
-class Coll_ ## cat ## _ ## name : public Coll { \
-public: \
-static ret cat (COLL_UNPAREN args); \
-};
+#define COLL_DESCRIPTION(cat, ret, args, name) \
+ { \
+ _XBT_STRINGIFY(name) \
+ , _XBT_STRINGIFY(cat) " " _XBT_STRINGIFY(name) " collective", (void*)_XBT_CONCAT4(Coll_, cat, _, name)::cat \
+ }
+
+#define COLL_PROTO(cat, ret, args, name) \
+ class _XBT_CONCAT4(Coll_, cat, _, name) : public Coll { \
+ public: \
+ static ret cat(COLL_UNPAREN args); \
+ };
#define COLL_UNPAREN(...) __VA_ARGS__
static std::unordered_map<std::string, simgrid::smpi::Datatype*> id2type_lookup;
#define CREATE_MPI_DATATYPE(name, id, type) \
- static simgrid::smpi::Datatype mpi_##name((char*)#name, id, sizeof(type), /* size */ \
- 0, /* lb */ \
- sizeof(type), /* ub = lb + size */ \
- DT_FLAG_BASIC /* flags */ \
- ); \
- const MPI_Datatype name = &mpi_##name;
+ static simgrid::smpi::Datatype _XBT_CONCAT(mpi_, name)((char*)_XBT_STRINGIFY(name), id, sizeof(type), /* size */ \
+ 0, /* lb */ \
+ sizeof(type), /* ub = lb + size */ \
+ DT_FLAG_BASIC /* flags */ \
+ ); \
+ const MPI_Datatype name = &_XBT_CONCAT(mpi_, name);
#define CREATE_MPI_DATATYPE_NULL(name, id) \
- static simgrid::smpi::Datatype mpi_##name((char*)#name, id, 0, /* size */ \
- 0, /* lb */ \
- 0, /* ub = lb + size */ \
- DT_FLAG_BASIC /* flags */ \
- ); \
- const MPI_Datatype name = &mpi_##name;
+ static simgrid::smpi::Datatype _XBT_CONCAT(mpi_, name)((char*)_XBT_STRINGIFY(name), id, 0, /* size */ \
+ 0, /* lb */ \
+ 0, /* ub = lb + size */ \
+ DT_FLAG_BASIC /* flags */ \
+ ); \
+ const MPI_Datatype name = &_XBT_CONCAT(mpi_, name);
// Predefined data types
CREATE_MPI_DATATYPE(MPI_CHAR, 2, char);
APPLY_OP_LOOP(MPI_LONG_DOUBLE_INT, long_double_int,op)\
APPLY_OP_LOOP(MPI_2LONG, long_long,op)
-#define APPLY_END_OP_LOOP(op)\
- {\
- xbt_die("Failed to apply " #op " to type %s", (*datatype)->name());\
+#define APPLY_END_OP_LOOP(op) \
+ { \
+ xbt_die("Failed to apply " _XBT_STRINGIFY(op) " to type %s", (*datatype)->name()); \
}
static void max_func(void *a, void *b, int *length, MPI_Datatype * datatype)
/* obviously a no-op */
}
-#define CREATE_MPI_OP(name, func) \
- static SMPI_Op mpi_##name (&(func) /* func */, true, true ); \
-MPI_Op name = &mpi_##name;
+#define CREATE_MPI_OP(name, func) \
+ static SMPI_Op _XBT_CONCAT(mpi_, name)(&(func) /* func */, true, true); \
+ MPI_Op name = &_XBT_CONCAT(mpi_, name);
CREATE_MPI_OP(MPI_MAX, max_func);
CREATE_MPI_OP(MPI_MIN, min_func);
XBT_PUBLIC void model_help(const char* category, const std::vector<surf_model_description_t>& table);
#define SIMGRID_REGISTER_PLUGIN(id, desc, init) \
- static void XBT_ATTRIB_CONSTRUCTOR(800) simgrid_##id##_plugin_register() \
+ static void XBT_ATTRIB_CONSTRUCTOR(800) _XBT_CONCAT3(simgrid_, id, _plugin_register)() \
{ \
- simgrid_add_plugin_description(#id, desc, init); \
+ simgrid_add_plugin_description(_XBT_STRINGIFY(id), desc, init); \
}
XBT_PUBLIC void simgrid_add_plugin_description(const char* name, const char* description, void_f_void_t init_fun);
}
#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
-#define test_types_set_mpi_2_2_integer(op,post) do { \
- op##_test##post(int8_t, MPI_INT8_T); \
- op##_test##post(int16_t, MPI_INT16_T); \
- op##_test##post(int32_t, MPI_INT32_T); \
- op##_test##post(int64_t, MPI_INT64_T); \
- op##_test##post(uint8_t, MPI_UINT8_T); \
- op##_test##post(uint16_t, MPI_UINT16_T); \
- op##_test##post(uint32_t, MPI_UINT32_T); \
- op##_test##post(uint64_t, MPI_UINT64_T); \
- op##_test##post(MPI_Aint, MPI_AINT); \
- op##_test##post(MPI_Offset, MPI_OFFSET); \
- } while (0)
+#define test_types_set_mpi_2_2_integer(op, post) \
+ do { \
+ _XBT_CONCAT3(op, _test, post)(int8_t, MPI_INT8_T); \
+ _XBT_CONCAT3(op, _test, post)(int16_t, MPI_INT16_T); \
+ _XBT_CONCAT3(op, _test, post)(int32_t, MPI_INT32_T); \
+ _XBT_CONCAT3(op, _test, post)(int64_t, MPI_INT64_T); \
+ _XBT_CONCAT3(op, _test, post)(uint8_t, MPI_UINT8_T); \
+ _XBT_CONCAT3(op, _test, post)(uint16_t, MPI_UINT16_T); \
+ _XBT_CONCAT3(op, _test, post)(uint32_t, MPI_UINT32_T); \
+ _XBT_CONCAT3(op, _test, post)(uint64_t, MPI_UINT64_T); \
+ _XBT_CONCAT3(op, _test, post)(MPI_Aint, MPI_AINT); \
+ _XBT_CONCAT3(op, _test, post)(MPI_Offset, MPI_OFFSET); \
+ } while (0)
#else
#define test_types_set_mpi_2_2_integer(op,post) do { } while (0)
#endif
#if MTEST_HAVE_MIN_MPI_VERSION(3,0)
-#define test_types_set_mpi_3_0_integer(op,post) do { \
- op##_test##post(MPI_Count, MPI_COUNT); \
- } while (0)
+#define test_types_set_mpi_3_0_integer(op, post) \
+ do { \
+ _XBT_CONCAT3(op, _test, post)(MPI_Count, MPI_COUNT); \
+ } while (0)
#else
#define test_types_set_mpi_3_0_integer(op,post) do { } while (0)
#endif
-#define test_types_set1(op, post) \
- { \
- op##_test##post(int, MPI_INT); \
- op##_test##post(long, MPI_LONG); \
- op##_test##post(short, MPI_SHORT); \
- op##_test##post(unsigned short, MPI_UNSIGNED_SHORT); \
- op##_test##post(unsigned, MPI_UNSIGNED); \
- op##_test##post(unsigned long, MPI_UNSIGNED_LONG); \
- op##_test##post(unsigned char, MPI_UNSIGNED_CHAR); \
- test_types_set_mpi_2_2_integer(op,post); \
- test_types_set_mpi_3_0_integer(op,post); \
- }
-
-#define test_types_set2(op, post) \
- { \
- test_types_set1(op, post); \
- op##_test##post(float, MPI_FLOAT); \
- op##_test##post(double, MPI_DOUBLE); \
- }
-
-#define test_types_set3(op, post) \
- { \
- op##_test##post(unsigned char, MPI_BYTE); \
- }
+#define test_types_set1(op, post) \
+ { \
+ _XBT_CONCAT3(op, _test, post)(int, MPI_INT); \
+ _XBT_CONCAT3(op, _test, post)(long, MPI_LONG); \
+ _XBT_CONCAT3(op, _test, post)(short, MPI_SHORT); \
+ _XBT_CONCAT3(op, _test, post)(unsigned short, MPI_UNSIGNED_SHORT); \
+ _XBT_CONCAT3(op, _test, post)(unsigned, MPI_UNSIGNED); \
+ _XBT_CONCAT3(op, _test, post)(unsigned long, MPI_UNSIGNED_LONG); \
+ _XBT_CONCAT3(op, _test, post)(unsigned char, MPI_UNSIGNED_CHAR); \
+ test_types_set_mpi_2_2_integer(op, post); \
+ test_types_set_mpi_3_0_integer(op, post); \
+ }
+
+#define test_types_set2(op, post) \
+ { \
+ test_types_set1(op, post); \
+ _XBT_CONCAT3(op, _test, post)(float, MPI_FLOAT); \
+ _XBT_CONCAT3(op, _test, post)(double, MPI_DOUBLE); \
+ }
+
+#define test_types_set3(op, post) \
+ { \
+ _XBT_CONCAT3(op, _test, post)(unsigned char, MPI_BYTE); \
+ }
/* Make sure that we test complex and double complex, even if long
double complex is not available */
#if MTEST_HAVE_MIN_MPI_VERSION(2,2) && defined(HAVE_FLOAT__COMPLEX) \
&& defined(HAVE_DOUBLE__COMPLEX) \
&& defined(HAVE_LONG_DOUBLE__COMPLEX)
-#define test_types_set4(op, post) \
- do { \
- op##_test##post(float _Complex, MPI_C_FLOAT_COMPLEX); \
- op##_test##post(double _Complex, MPI_C_DOUBLE_COMPLEX); \
- if (MPI_C_LONG_DOUBLE_COMPLEX != MPI_DATATYPE_NULL) { \
- op##_test##post(long double _Complex, MPI_C_LONG_DOUBLE_COMPLEX); \
- } \
- } while (0)
+#define test_types_set4(op, post) \
+ do { \
+ _XBT_CONCAT3(op, _test, post)(float _Complex, MPI_C_FLOAT_COMPLEX); \
+ _XBT_CONCAT3(op, _test, post)(double _Complex, MPI_C_DOUBLE_COMPLEX); \
+ if (MPI_C_LONG_DOUBLE_COMPLEX != MPI_DATATYPE_NULL) { \
+ _XBT_CONCAT3(op, _test, post)(long double _Complex, MPI_C_LONG_DOUBLE_COMPLEX); \
+ } \
+ } while (0)
#else
#define test_types_set4(op, post) do { } while (0)
#if MTEST_HAVE_MIN_MPI_VERSION(2,2) && defined(HAVE_FLOAT__COMPLEX) \
&& defined(HAVE_DOUBLE__COMPLEX)
-#define test_types_set4(op, post) \
- do { \
- op##_test##post(float _Complex, MPI_C_FLOAT_COMPLEX); \
- op##_test##post(double _Complex, MPI_C_DOUBLE_COMPLEX); \
- } while (0)
+#define test_types_set4(op, post) \
+ do { \
+ _XBT_CONCAT3(op, _test, post)(float _Complex, MPI_C_FLOAT_COMPLEX); \
+ _XBT_CONCAT3(op, _test, post)(double _Complex, MPI_C_DOUBLE_COMPLEX); \
+ } while (0)
#else
#define test_types_set4(op, post) do { } while (0)
#endif /* defined(USE_LONG_DOUBLE_COMPLEX) */
#if MTEST_HAVE_MIN_MPI_VERSION(2,2) && defined(HAVE__BOOL)
-#define test_types_set5(op, post) \
- do { \
- op##_test##post(_Bool, MPI_C_BOOL); \
- } while (0)
+#define test_types_set5(op, post) \
+ do { \
+ _XBT_CONCAT3(op, _test, post)(_Bool, MPI_C_BOOL); \
+ } while (0)
#else
#define test_types_set5(op, post) do { } while (0)
namespace lmm = simgrid::kernel::lmm;
-#define PRINT_VAR(var) XBT_DEBUG(#var " = %g", (var)->get_value())
-#define SHOW_EXPR(expr) XBT_DEBUG(#expr " = %g",expr)
+#define PRINT_VAR(var) XBT_DEBUG(_XBT_STRINGIFY(var) " = %g", (var)->get_value())
+#define SHOW_EXPR(expr) XBT_DEBUG(_XBT_STRINGIFY(expr) " = %g", expr)
/* ______ */
/* ==l1== L2 ==L3== */