-/* Copyright (c) 2007-2020. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2007-2021. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
constexpr unsigned MPI_REQ_NON_PERSISTENT = 0x2;
constexpr unsigned MPI_REQ_SEND = 0x4;
constexpr unsigned MPI_REQ_RECV = 0x8;
-constexpr unsigned MPI_REQ_RECV_DELETE = 0x10;
+constexpr unsigned MPI_REQ_PROBE = 0x10;
constexpr unsigned MPI_REQ_ISEND = 0x20;
constexpr unsigned MPI_REQ_SSEND = 0x40;
constexpr unsigned MPI_REQ_PREPARED = 0x80;
constexpr unsigned MPI_REQ_GENERALIZED = 0x800;
constexpr unsigned MPI_REQ_COMPLETE = 0x1000;
constexpr unsigned MPI_REQ_BSEND = 0x2000;
+constexpr unsigned MPI_REQ_MATCHED = 0x4000;
+constexpr unsigned MPI_REQ_CANCELLED = 0x8000;
enum class SmpiProcessState { UNINITIALIZED, INITIALIZING, INITIALIZED /*(=MPI_Init called)*/, FINALIZED };
extern XBT_PRIVATE MPI_Comm MPI_COMM_UNINITIALIZED;
-typedef SMPI_Cart_topology* MPIR_Cart_Topology;
-
-typedef SMPI_Graph_topology* MPIR_Graph_Topology;
-
-typedef SMPI_Dist_Graph_topology* MPIR_Dist_Graph_Topology;
+using MPIR_Cart_Topology = SMPI_Cart_topology*;
+using MPIR_Graph_Topology = SMPI_Graph_topology*;
+using MPIR_Dist_Graph_Topology = SMPI_Dist_Graph_topology*;
XBT_PRIVATE simgrid::smpi::ActorExt* smpi_process();
XBT_PRIVATE simgrid::smpi::ActorExt* smpi_process_remote(simgrid::s4u::ActorPtr actor);
void mpi_file_close_(int* file, int* ierr);
void mpi_file_delete_(char* filename, int* info, int* ierr);
void mpi_file_open_(int* comm, char* filename, int* amode, int* info, int* fh, int* ierr);
-void mpi_file_set_view_(int* fh, long long int* offset, int* etype, int* filetype, char* datarep, int* info, int* ierr);
+void mpi_file_seek_(int* fh, MPI_Offset* offset, int* whence, int* ierr);
+void mpi_file_seek_shared_(int* fh, MPI_Offset* offset, int* whence, int* ierr);
+void mpi_file_get_position_(int* fh, MPI_Offset* offset, int* ierr);
+void mpi_file_get_position_shared_(int* fh, MPI_Offset* offset, int* ierr);
+void mpi_file_set_view_(int* fh, MPI_Offset* offset, int* etype, int* filetype, char* datarep, int* info, int* ierr);
+void mpi_file_get_view_(int* fh, MPI_Offset* disp, int* etype, int* filetype, char *datarep, int* ierr);
void mpi_file_read_(int* fh, void* buf, int* count, int* datatype, MPI_Status* status, int* ierr);
+void mpi_file_read_shared_(int* fh, void *buf, int* count, int* datatype, MPI_Status* status, int* ierr);
+void mpi_file_read_all_(int* fh, void *buf, int* count, int* datatype, MPI_Status* status, int* ierr);
+void mpi_file_read_ordered_(int* fh, void *buf, int* count, int* datatype, MPI_Status* status, int* ierr);
+void mpi_file_read_at_(int* fh, MPI_Offset* offset, void *buf, int* count, int* datatype, MPI_Status* status, int* ierr);
+void mpi_file_read_at_all_(int* fh, MPI_Offset* offset, void *buf, int* count, int* datatype, MPI_Status* status, int* ierr);
void mpi_file_write_(int* fh, void* buf, int* count, int* datatype, MPI_Status* status, int* ierr);
+void mpi_file_write_all_(int* fh, void* buf, int* count, int* datatype, MPI_Status* status, int* ierr);
+void mpi_file_write_shared_(int* fh, void *buf, int* count, int* datatype, MPI_Status* status, int* ierr);
+void mpi_file_write_ordered_(int* fh, void *buf, int* count, int* datatype, MPI_Status* status, int* ierr);
+void mpi_file_write_at_(int* fh, MPI_Offset* offset, void *buf, int* count, int* datatype, MPI_Status* status, int* ierr);
+void mpi_file_write_at_all_(int* fh, MPI_Offset* offset, void *buf, int* count, int* datatype, MPI_Status* status, int* ierr);
void smpi_init_fortran_types();
void smpi_execute_flops_(double* flops);
+void smpi_execute_flops_benched_(double* flops);
void smpi_execute_(double* duration);
void smpi_execute_benched_(double* duration);
} // extern "C"
void* address;
int file_descriptor;
};
-typedef s_smpi_privatization_region_t* smpi_privatization_region_t;
+using smpi_privatization_region_t = s_smpi_privatization_region_t*;
extern XBT_PRIVATE int smpi_loaded_page;
XBT_PRIVATE smpi_privatization_region_t smpi_init_global_memory_segment_process();
#define TOPAGE(addr) (void*)(((unsigned long)(addr) / xbt_pagesize) * xbt_pagesize)
/** Used only if PAPI is compiled in, but integrated anyway so that this file does not depend on internal_config.h (to speed builds) */
-typedef std::vector<std::pair</* counter name */ std::string, /* counter value */ long long>> papi_counter_t;
+using papi_counter_t = std::vector<std::pair</* counter name */ std::string, /* counter value */ long long>>;
struct papi_process_data {
papi_counter_t counter_data;
int event_set;
};
-extern std::map</* computation unit name */ std::string, papi_process_data> units2papi_setup;
+extern std::map</* computation unit name */ std::string, papi_process_data, std::less<>> units2papi_setup;
extern std::unordered_map<std::string, double> location2speedup;
XBT_PRIVATE void private_execute_flops(double flops);
-
#define CHECK_ARGS(test, errcode, ...) \
if (test) { \
- if((errcode) != MPI_SUCCESS) \
+ int error_code_ = (errcode); \
+ if (error_code_ != MPI_SUCCESS) \
XBT_WARN(__VA_ARGS__); \
- return (errcode); \
+ return error_code_; \
}
#define CHECK_MPI_NULL(num, val, err, ptr) \
#define CHECK_NEGATIVE(num, err, val) \
CHECK_ARGS((val) < 0, (err), \
"%s: param %d %s cannot be negative", __func__, (num), _XBT_STRINGIFY(val));
+#define CHECK_NEGATIVE_OR_ZERO(num, err, val) \
+ CHECK_ARGS((val) <= 0, (err), \
+ "%s: param %d %s cannot be negative or 0", __func__, (num), _XBT_STRINGIFY(val));
#define CHECK_COMM2(num, comm) \
CHECK_MPI_NULL((num), MPI_COMM_NULL, MPI_ERR_COMM, (comm))
+#define CHECK_DELETED(num, err, obj) \
+ CHECK_ARGS((obj)->deleted(), (err), "%s: param %d %s has already been freed", __func__, (num), _XBT_STRINGIFY(obj));
#define CHECK_COMM(num) \
- CHECK_COMM2((num), comm)
+ CHECK_COMM2((num), comm) \
+ CHECK_DELETED((num), MPI_ERR_COMM, comm)
#define CHECK_REQUEST(num) \
CHECK_ARGS(request == nullptr, MPI_ERR_REQUEST, \
"%s: param %d request cannot be NULL",__func__, (num));
#define CHECK_TYPE(num, datatype) \
CHECK_ARGS(((datatype) == MPI_DATATYPE_NULL|| not (datatype)->is_valid()), MPI_ERR_TYPE, \
"%s: param %d %s cannot be MPI_DATATYPE_NULL or invalid", __func__, (num), _XBT_STRINGIFY(datatype));
-#define CHECK_OP(num) \
- CHECK_MPI_NULL((num), MPI_OP_NULL, MPI_ERR_OP, op)
+#define CHECK_OP(num, op, type) \
+ CHECK_MPI_NULL((num), MPI_OP_NULL, MPI_ERR_OP, (op)) \
+ CHECK_ARGS(((op)->allowed_types() && (((op)->allowed_types() & (type)->flags()) == 0)), MPI_ERR_OP, \
+ "%s: param %d op %s can't be applied to type %s", __func__, (num), _XBT_STRINGIFY(op), type->name());
#define CHECK_ROOT(num)\
CHECK_ARGS((root < 0 || root >= comm->size()), MPI_ERR_ROOT, \
"%s: param %d root (=%d) cannot be negative or larger than communicator size (=%d)", __func__, (num), \
root, comm->size());
-#define CHECK_PROC(num,proc) \
- CHECK_MPI_NULL((num), MPI_PROC_NULL, MPI_SUCCESS, (proc))
#define CHECK_INFO(num,info) \
- CHECK_MPI_NULL((num), MPI_INFO_NULL, MPI_ERR_INFO, (info))
+ CHECK_MPI_NULL((num), MPI_INFO_NULL, MPI_ERR_INFO, (info))
#define CHECK_TAG(num,tag) \
CHECK_ARGS(((tag) < 0 && (tag) != MPI_ANY_TAG), MPI_ERR_TAG, \
"%s: param %d %s (=%d) cannot be negative", __func__, (num), _XBT_STRINGIFY(tag), (tag));
#define CHECK_FILE(num, fh) \
- CHECK_MPI_NULL((num), MPI_FILE_NULL, MPI_ERR_FILE, (fh))
+ CHECK_MPI_NULL((num), MPI_FILE_NULL, MPI_ERR_FILE, (fh))
#define CHECK_OFFSET(num, offset) \
CHECK_NEGATIVE((num), MPI_ERR_DISP, (offset))
#define CHECK_GROUP(num, group) \
- CHECK_MPI_NULL((num), MPI_GROUP_NULL, MPI_ERR_GROUP, (group))
+ CHECK_MPI_NULL((num), MPI_GROUP_NULL, MPI_ERR_GROUP, (group))
#define CHECK_WIN(num, win) \
- CHECK_MPI_NULL((num), MPI_WIN_NULL, MPI_ERR_WIN, (win))
+ CHECK_MPI_NULL((num), MPI_WIN_NULL, MPI_ERR_WIN, (win))
#define CHECK_RANK(num, rank, comm) \
- CHECK_ARGS(((rank) >= (comm)->group()->size() || (rank) <0), MPI_ERR_RANK, \
+ CHECK_ARGS(((rank) >= (comm)->size() || (rank) <0), MPI_ERR_RANK, \
"%s: param %d %s (=%d) cannot be < 0 or > %d", __func__, (num), _XBT_STRINGIFY(rank), \
- (rank), (comm)->group()->size() );
+ (rank), (comm)->size() );
+#define CHECK_PROC_RMA(num,proc,win) \
+ CHECK_MPI_NULL((num), MPI_PROC_NULL, MPI_SUCCESS, (proc)) \
+ CHECK_RANK(num, proc, win->comm())
+#define CHECK_NOT_IN_PLACE_ROOT(num, buf) \
+ CHECK_ARGS((buf == MPI_IN_PLACE), MPI_ERR_BUFFER, \
+ "%s: param %d %s cannot be MPI_IN_PLACE for rank %d with root %d", __func__, (num), _XBT_STRINGIFY(buf), \
+ rank, root);
+#define CHECK_NOT_IN_PLACE(num, buf) \
+ CHECK_ARGS((buf == MPI_IN_PLACE), MPI_ERR_BUFFER, \
+ "%s: param %d %s cannot be MPI_IN_PLACE for rank %d", __func__, (num), _XBT_STRINGIFY(buf), \
+ rank);
#endif