typedef struct s_smx_simcall s_smx_simcall_t;
typedef struct s_smx_simcall* smx_simcall_t;
-typedef enum {
+typedef enum { // FIXME: move this to s4u::Link; make it an enum class
SURF_LINK_FULLDUPLEX = 2,
- SURF_LINK_SHARED = 1,
- SURF_LINK_FATPIPE = 0
+ SURF_LINK_SHARED = 1,
+ SURF_LINK_FATPIPE = 0
} e_surf_link_sharing_policy_t;
-typedef enum {
+typedef enum { // FIXME: move this away; make it an enum class
SURF_TRACE_CONNECT_KIND_HOST_AVAIL = 4,
- SURF_TRACE_CONNECT_KIND_SPEED = 3,
+ SURF_TRACE_CONNECT_KIND_SPEED = 3,
SURF_TRACE_CONNECT_KIND_LINK_AVAIL = 2,
- SURF_TRACE_CONNECT_KIND_BANDWIDTH = 1,
- SURF_TRACE_CONNECT_KIND_LATENCY = 0
+ SURF_TRACE_CONNECT_KIND_BANDWIDTH = 1,
+ SURF_TRACE_CONNECT_KIND_LATENCY = 0
} e_surf_trace_connect_kind_t;
-typedef enum {
- SURF_ACTOR_ON_FAILURE_DIE = 1,
+typedef enum { // FIXME: move this to s4u::Actor; make it an enum class
+ SURF_ACTOR_ON_FAILURE_DIE = 1,
SURF_ACTOR_ON_FAILURE_RESTART = 0
} e_surf_process_on_failure_t;
-
/** @ingroup m_datatypes_management_details
* @brief Type for any simgrid size
*/
integer MPI_COMM_NULL_DUP_FN, MPI_COMM_DUP_FN
parameter(MPI_COMM_NULL_DUP_FN =0)
parameter(MPI_COMM_DUP_FN =0)
+ integer MPI_WIN_NULL_COPY_FN, MPI_WIN_NULL_DELETE_FN
+ parameter(MPI_WIN_NULL_COPY_FN =0)
+ parameter(MPI_WIN_NULL_DELETE_FN =0)
+ integer MPI_WIN_DUP_FN
+ parameter(MPI_WIN_DUP_FN =0)
integer MPI_ROOT, MPI_COMM_TYPE_SHARED
parameter(MPI_ROOT=0)
parameter(MPI_GROUP_EMPTY=-2)
parameter(MPI_WIN_NULL=-1)
+ integer MPI_WIN_BASE, MPI_WIN_SIZE, MPI_WIN_DISP_UNIT
+
+ parameter(MPI_WIN_BASE=-1)
+ parameter(MPI_WIN_SIZE=-2)
+ parameter(MPI_WIN_DISP_UNIT=-3)
+
! These IDs have to be unique, consecutive
! and ordered as in smpi_f77.cpp.
parameter(MPI_COMM_WORLD=0)
static bool trace_configured = false;
static bool trace_active = false;
+static type_t rootType = nullptr; /* the root type */
+
instr_fmt_type_t instr_fmt_type = instr_fmt_paje;
static void TRACE_getopts()
/* destroy all data structures of tracing (and free) */
PJ_container_free_all();
- PJ_type_free_all();
+ recursiveDestroyType (PJ_type_get_root());
PJ_container_release();
- PJ_type_release();
+ rootType = nullptr;
xbt_dict_free(&user_link_variables);
xbt_dict_free(&user_host_variables);
{
XBT_DEBUG("%s not implemented for %p: event_type=%d, timestamp=%f", __FUNCTION__,
this, (int)event_type, timestamp);
-// xbt_backtrace_display_current();
-
- /* switch (event->event_type){
- case PAJE_StartLink:
- xbt_free (((startLink_t)(event->data))->value);
- xbt_free (((startLink_t)(event->data))->key);
- break;
- case PAJE_EndLink:
- xbt_free (((endLink_t)(event->data))->value);
- xbt_free (((endLink_t)(event->data))->key);
- break;
- default:
- break;
- }*/
}
void TRACE_paje_start() {
static type_t rootType = nullptr; /* the root type */
-void PJ_type_alloc ()
-{
-}
-
void PJ_type_release ()
{
rootType = nullptr;
}
-
type_t PJ_type_get_root ()
{
return rootType;
char *value_name;
xbt_dict_cursor_t cursor = nullptr;
xbt_dict_foreach(type->values, cursor, value_name, value) {
- PJ_value_free (value);
+ XBT_DEBUG("free value %s, child of %s", value->name, value->father->name);
+ xbt_free(value->name);
+ xbt_free(value->color);
+ xbt_free(value->id);
+ xbt_free(value);
}
xbt_dict_free (&type->values);
xbt_free (type->name);
type = nullptr;
}
-static void recursiveDestroyType (type_t type)
+void recursiveDestroyType (type_t type)
{
XBT_DEBUG("recursiveDestroyType %s", type->name);
xbt_dict_cursor_t cursor = nullptr;
PJ_type_free(type);
}
-void PJ_type_free_all ()
-{
- recursiveDestroyType (PJ_type_get_root());
- rootType = nullptr;
-}
-
type_t PJ_type_get (const char *name, type_t father)
{
type_t ret = PJ_type_get_or_null (name, father);
}
return ret;
}
-
-void PJ_value_free (val_t value)
-{
- XBT_DEBUG("free value %s, child of %s", value->name, value->father->name);
- xbt_free(((val_t)value)->name);
- xbt_free(((val_t)value)->color);
- xbt_free(((val_t)value)->id);
- xbt_free(value);
-}
XBT_PUBLIC(void) PJ_container_remove_from_parent (container_t container);
/* instr_paje_types.c */
-XBT_PRIVATE void PJ_type_alloc ();
XBT_PRIVATE void PJ_type_release ();
XBT_PUBLIC(type_t) PJ_type_get_root ();
XBT_PRIVATE type_t PJ_type_container_new (const char *name, type_t father);
XBT_PRIVATE type_t PJ_type_state_new (const char *name, type_t father);
XBT_PUBLIC(type_t) PJ_type_get (const char *name, const type_t father);
XBT_PUBLIC(type_t) PJ_type_get_or_null (const char *name, type_t father);
-void PJ_type_free_all ();
-XBT_PRIVATE XBT_PRIVATE void PJ_type_free (type_t type);
+XBT_PRIVATE XBT_PRIVATE void PJ_type_free (type_t type);
+
+/* instr_config.c */
+XBT_PRIVATE void recursiveDestroyType (type_t type);
/* instr_paje_values.c */
XBT_PUBLIC(val_t) PJ_value_new (const char *name, const char *color, type_t father);
XBT_PUBLIC(val_t) PJ_value_get_or_new (const char *name, const char *color, type_t father);
XBT_PUBLIC(val_t) PJ_value_get (const char *name, const type_t father);
-XBT_PRIVATE void PJ_value_free (val_t value);
XBT_PRIVATE void TRACE_TI_start();
XBT_PRIVATE void TRACE_TI_end();
void wait_for_requests()
{
#if SIMGRID_HAVE_MC
- xbt_assert(mc_model_checker == nullptr);
+ xbt_assert(mc_model_checker == nullptr, "This must be called from the client");
#endif
smx_actor_t process;
- smx_simcall_t req;
unsigned int iter;
while (not xbt_dynar_is_empty(simix_global->process_to_run)) {
SIMIX_process_runall();
xbt_dynar_foreach(simix_global->process_that_ran, iter, process) {
- req = &process->simcall;
+ smx_simcall_t req = &process->simcall;
if (req->call != SIMCALL_NONE && not simgrid::mc::request_is_visible(req))
SIMIX_simcall_handle(req, 0);
}
// Called from both MCer and MCed:
bool actor_is_enabled(smx_actor_t actor)
{
+#if SIMGRID_HAVE_MC
+ // If in the MCer, ask the client app since it has all the data
+ if (mc_model_checker != nullptr) {
+ return mc_model_checker->process().actor_is_enabled(actor->pid);
+ }
+#endif
+
+ // Now, we are in the client app, no need for remote memory reading.
smx_simcall_t req = &actor->simcall;
- // TODO, add support for the subtypes?
switch (req->call) {
case SIMCALL_NONE:
simgrid::kernel::activity::CommImpl* act =
static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_wait__getraw__comm(req));
-#if SIMGRID_HAVE_MC
- // Fetch from MCed memory:
- // HACK, type puning
- if (mc_model_checker != nullptr) {
- simgrid::mc::Remote<simgrid::kernel::activity::CommImpl> temp_comm;
- mc_model_checker->process().read(temp_comm, remote(act));
- act = static_cast<simgrid::kernel::activity::CommImpl*>(temp_comm.getBuffer());
- }
-#endif
-
if (act->src_timeout || act->dst_timeout) {
/* If it has a timeout it will be always be enabled (regardless of who declared the timeout),
* because even if the communication is not ready, it can timeout and won't block. */
simgrid::kernel::activity::CommImpl* act =
static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_wait__getraw__comm(req));
-#if SIMGRID_HAVE_MC
- s_xbt_dynar_t comms_buffer;
- size_t buffer_size = 0;
- if (mc_model_checker != nullptr) {
- // Read dynar:
- mc_model_checker->process().read(&comms_buffer, remote(simcall_comm_waitany__get__comms(req)));
- assert(comms_buffer.elmsize == sizeof(act));
- buffer_size = comms_buffer.elmsize * comms_buffer.used;
- comms = &comms_buffer;
- } else
- comms = simcall_comm_waitany__get__comms(req);
-
- // Read all the dynar buffer:
- char buffer[buffer_size];
- if (mc_model_checker != nullptr)
- mc_model_checker->process().read_bytes(buffer, sizeof(buffer), remote(comms->data));
-#else
comms = simcall_comm_waitany__get__comms(req);
-#endif
for (unsigned int index = 0; index < comms->used; ++index) {
-#if SIMGRID_HAVE_MC
- // Fetch act from MCed memory:
- // HACK, type puning
- simgrid::mc::Remote<simgrid::kernel::activity::CommImpl> temp_comm;
- if (mc_model_checker != nullptr) {
- memcpy(&act, buffer + comms->elmsize * index, sizeof(act));
- mc_model_checker->process().read(temp_comm, remote(act));
- act = static_cast<simgrid::kernel::activity::CommImpl*>(temp_comm.getBuffer());
- } else
-#endif
- act = xbt_dynar_get_as(comms, index, simgrid::kernel::activity::CommImpl*);
+ act = xbt_dynar_get_as(comms, index, simgrid::kernel::activity::CommImpl*);
if (act->src_proc && act->dst_proc)
return true;
}
case SIMCALL_MUTEX_LOCK: {
smx_mutex_t mutex = simcall_mutex_lock__get__mutex(req);
-#if SIMGRID_HAVE_MC
- simgrid::mc::Remote<simgrid::simix::MutexImpl> temp_mutex;
- if (mc_model_checker != nullptr) {
- mc_model_checker->process().read(temp_mutex.getBuffer(), remote(mutex));
- mutex = temp_mutex.getBuffer();
- }
-#endif
if (mutex->owner == nullptr)
return true;
-#if SIMGRID_HAVE_MC
- else if (mc_model_checker != nullptr) {
- simgrid::mc::RemoteClient& modelchecked = mc_model_checker->process();
- // TODO, *(mutex->owner) :/
- return modelchecked.resolveActor(simgrid::mc::remote(mutex->owner))->pid ==
- modelchecked.resolveActor(simgrid::mc::remote(req->issuer))->pid;
- }
-#endif
else
return mutex->owner->pid == req->issuer->pid;
}
}
}
+/* This is the list of requests that are visible from the checker algorithm.
+ * Any other requests are handled right away on the application side.
+ */
bool request_is_visible(smx_simcall_t req)
{
+#if SIMGRID_HAVE_MC
+ xbt_assert(mc_model_checker == nullptr, "This should be called from the client side");
+#endif
+
return req->call == SIMCALL_COMM_ISEND
|| req->call == SIMCALL_COMM_IRECV
|| req->call == SIMCALL_COMM_WAIT
*/
XBT_PRIVATE bool actor_is_enabled(smx_actor_t process);
-/** Check if the given simcall is visible
- *
- * \return `TRUE` or `FALSE`
- */
+/** Check if the given simcall is visible */
XBT_PRIVATE bool request_is_visible(smx_simcall_t req);
}
smpi_really_switch_data_segment(message->index);
#endif
}
+void Client::handleActorEnabled(s_mc_message_actor_enabled_t* msg)
+{
+ bool res = simgrid::mc::actor_is_enabled(SIMIX_process_from_PID(msg->aid));
+ s_mc_message_int answer{MC_MESSAGE_ACTOR_ENABLED_REPLY, res};
+ channel_.send(answer);
+}
void Client::handleMessages()
{
handleRestore((s_mc_message_restore_t*)message_buffer);
break;
+ case MC_MESSAGE_ACTOR_ENABLED:
+ xbt_assert(received_size == sizeof(s_mc_message_actor_enabled_t),
+ "Unexpected size for ACTOR_ENABLED (%zu != %zu)", received_size,
+ sizeof(s_mc_message_actor_enabled_t));
+ handleActorEnabled((s_mc_message_actor_enabled_t*)message_buffer);
+ break;
+
default:
xbt_die("Received unexpected message %s (%i)", MC_message_type_name(message->type), message->type);
break;
void handleContinue(mc_message_t* msg);
void handleSimcall(s_mc_message_simcall_handle_t* message);
void handleRestore(s_mc_message_restore_t* msg);
+ void handleActorEnabled(s_mc_message_actor_enabled_t* msg);
public:
Channel const& getChannel() const { return channel_; }
unw_destroy_addr_space(as);
return;
}
+
+bool RemoteClient::actor_is_enabled(aid_t pid)
+{
+ s_mc_message_actor_enabled msg{MC_MESSAGE_ACTOR_ENABLED, pid};
+ process()->getChannel().send(msg);
+ char buff[MC_MESSAGE_LENGTH];
+ ssize_t received = process()->getChannel().receive(buff, MC_MESSAGE_LENGTH, true);
+ xbt_assert(received == sizeof(s_mc_message_int), "Unexpected size in answer to ACTOR_ENABLED");
+ return ((mc_message_int_t*)buff)->value;
+}
}
}
/** The corresponding context
*/
void* unw_underlying_context;
+
+ /* Check whether the given actor is enabled */
+ bool actor_is_enabled(aid_t pid);
};
/** Open a FD to a remote process memory (`/dev/$pid/mem`)
return "SIMCALL_HANDLE";
case MC_MESSAGE_ASSERTION_FAILED:
return "ASSERTION_FAILED";
+
+ case MC_MESSAGE_ACTOR_ENABLED:
+ return "ACTOR_ENABLED";
+ case MC_MESSAGE_ACTOR_ENABLED_REPLY:
+ return "ACTOR_ENABLED_REPLY";
+
default:
return "?";
}
#include <xbt/base.h>
#include "mc/datatypes.h"
+#include "simgrid/forward.h"
SG_BEGIN_DECL()
MC_MESSAGE_ASSERTION_FAILED,
// MCer request to finish the restoration:
MC_MESSAGE_RESTORE,
+ MC_MESSAGE_ACTOR_ENABLED,
+ MC_MESSAGE_ACTOR_ENABLED_REPLY
} e_mc_message_type;
#define MC_MESSAGE_LENGTH 512
};
typedef struct s_mc_message_restore s_mc_message_restore_t;
+struct s_mc_message_actor_enabled {
+ e_mc_message_type type;
+ aid_t aid; // actor ID
+};
+typedef struct s_mc_message_actor_enabled s_mc_message_actor_enabled_t;
+
XBT_PRIVATE const char* MC_message_type_name(e_mc_message_type type);
SG_END_DECL()
static int running_processes = 0;
-#if defined(__alpha__) || defined(__sparc64__) || defined(__x86_64__) || defined(__ia64__)
-typedef int integer;
-#else
-typedef long int integer;
-#endif
-
-/* Convert between Fortran and C */
-
-#define FORT_BOTTOM(addr) ((*(int*)addr) == -200 ? MPI_BOTTOM : (void*)addr)
-#define FORT_IN_PLACE(addr) ((*(int*)addr) == -100 ? MPI_IN_PLACE : (void*)addr)
-#define FORT_STATUS_IGNORE(addr) (static_cast<MPI_Status*>((*(int*)addr) == -300 ? MPI_STATUS_IGNORE : (void*)addr))
-#define FORT_STATUSES_IGNORE(addr) (static_cast<MPI_Status*>((*(int*)addr) == -400 ? MPI_STATUSES_IGNORE : (void*)addr))
-
-#define KEY_SIZE (sizeof(int) * 2 + 1)
-
-static char* get_key(char* key, int id) {
- snprintf(key, KEY_SIZE, "%x",id);
- return key;
-}
-
-static char* get_key_id(char* key, int id) {
- snprintf(key, KEY_SIZE, "%x_%d",id, smpi_process()->index());
- return key;
-}
-
static void smpi_init_fortran_types(){
if(simgrid::smpi::F2C::lookup() == nullptr){
MPI_COMM_WORLD->add_f();
*ierr = MPI_Abort(simgrid::smpi::Comm::f2c(*comm), *errorcode);
}
-void mpi_comm_rank_(int* comm, int* rank, int* ierr) {
- *ierr = MPI_Comm_rank(simgrid::smpi::Comm::f2c(*comm), rank);
-}
-
-void mpi_comm_size_(int* comm, int* size, int* ierr) {
- *ierr = MPI_Comm_size(simgrid::smpi::Comm::f2c(*comm), size);
-}
-
double mpi_wtime_() {
return MPI_Wtime();
}
return MPI_Wtick();
}
-void mpi_comm_dup_(int* comm, int* newcomm, int* ierr) {
- MPI_Comm tmp;
-
- *ierr = MPI_Comm_dup(simgrid::smpi::Comm::f2c(*comm), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *newcomm = tmp->add_f();
- }
-}
-
-void mpi_comm_create_(int* comm, int* group, int* newcomm, int* ierr) {
- MPI_Comm tmp;
-
- *ierr = MPI_Comm_create(simgrid::smpi::Comm::f2c(*comm),simgrid::smpi::Group::f2c(*group), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *newcomm = tmp->add_f();
- }
-}
-
-void mpi_comm_free_(int* comm, int* ierr) {
- MPI_Comm tmp = simgrid::smpi::Comm::f2c(*comm);
-
- *ierr = MPI_Comm_free(&tmp);
-
- if(*ierr == MPI_SUCCESS) {
- simgrid::smpi::Comm::free_f(*comm);
- }
-}
-
-void mpi_comm_split_(int* comm, int* color, int* key, int* comm_out, int* ierr) {
- MPI_Comm tmp;
-
- *ierr = MPI_Comm_split(simgrid::smpi::Comm::f2c(*comm), *color, *key, &tmp);
- if(*ierr == MPI_SUCCESS) {
- *comm_out = tmp->add_f();
- }
-}
-
void mpi_group_incl_(int* group, int* n, int* ranks, int* group_out, int* ierr) {
MPI_Group tmp;
}
}
-void mpi_comm_group_(int* comm, int* group_out, int* ierr) {
- MPI_Group tmp;
-
- *ierr = MPI_Comm_group(simgrid::smpi::Comm::f2c(*comm), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *group_out = tmp->c2f();
- }
-}
-
void mpi_initialized_(int* flag, int* ierr){
*ierr = MPI_Initialized(flag);
}
-void mpi_send_init_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr) {
- MPI_Request req;
- buf = static_cast<char *>(FORT_BOTTOM(buf));
- *ierr = MPI_Send_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm), &req);
- if(*ierr == MPI_SUCCESS) {
- *request = req->add_f();
- }
-}
-
-void mpi_isend_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr) {
- MPI_Request req;
- buf = static_cast<char *>(FORT_BOTTOM(buf));
- *ierr = MPI_Isend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm), &req);
- if(*ierr == MPI_SUCCESS) {
- *request = req->add_f();
- }
-}
-
-void mpi_irsend_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr) {
- MPI_Request req;
- buf = static_cast<char *>(FORT_BOTTOM(buf));
- *ierr = MPI_Irsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm), &req);
- if(*ierr == MPI_SUCCESS) {
- *request = req->add_f();
- }
-}
-
-void mpi_send_(void* buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* ierr) {
- buf = static_cast<char *>(FORT_BOTTOM(buf));
- *ierr = MPI_Send(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm));
-}
-
-void mpi_rsend_(void* buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* ierr) {
- buf = static_cast<char *>(FORT_BOTTOM(buf));
- *ierr = MPI_Rsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm));
-}
-
-void mpi_sendrecv_(void* sendbuf, int* sendcount, int* sendtype, int* dst, int* sendtag, void *recvbuf, int* recvcount,
- int* recvtype, int* src, int* recvtag, int* comm, MPI_Status* status, int* ierr) {
- sendbuf = static_cast<char *>( FORT_BOTTOM(sendbuf));
- recvbuf = static_cast<char *>( FORT_BOTTOM(recvbuf));
- *ierr = MPI_Sendrecv(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), *dst, *sendtag, recvbuf, *recvcount,
- simgrid::smpi::Datatype::f2c(*recvtype), *src, *recvtag, simgrid::smpi::Comm::f2c(*comm), FORT_STATUS_IGNORE(status));
-}
-
-void mpi_recv_init_(void *buf, int* count, int* datatype, int* src, int* tag, int* comm, int* request, int* ierr) {
- MPI_Request req;
- buf = static_cast<char *>( FORT_BOTTOM(buf));
- *ierr = MPI_Recv_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *src, *tag, simgrid::smpi::Comm::f2c(*comm), &req);
- if(*ierr == MPI_SUCCESS) {
- *request = req->add_f();
- }
-}
-
-void mpi_irecv_(void *buf, int* count, int* datatype, int* src, int* tag, int* comm, int* request, int* ierr) {
- MPI_Request req;
- buf = static_cast<char *>( FORT_BOTTOM(buf));
- *ierr = MPI_Irecv(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *src, *tag, simgrid::smpi::Comm::f2c(*comm), &req);
- if(*ierr == MPI_SUCCESS) {
- *request = req->add_f();
- }
-}
-
-void mpi_recv_(void* buf, int* count, int* datatype, int* src, int* tag, int* comm, MPI_Status* status, int* ierr) {
- buf = static_cast<char *>( FORT_BOTTOM(buf));
- *ierr = MPI_Recv(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *src, *tag, simgrid::smpi::Comm::f2c(*comm), status);
-}
-
-void mpi_start_(int* request, int* ierr) {
- MPI_Request req = simgrid::smpi::Request::f2c(*request);
-
- *ierr = MPI_Start(&req);
-}
-
-void mpi_startall_(int* count, int* requests, int* ierr) {
- MPI_Request* reqs;
- int i;
-
- reqs = xbt_new(MPI_Request, *count);
- for(i = 0; i < *count; i++) {
- reqs[i] = simgrid::smpi::Request::f2c(requests[i]);
- }
- *ierr = MPI_Startall(*count, reqs);
- xbt_free(reqs);
-}
-
-void mpi_wait_(int* request, MPI_Status* status, int* ierr) {
- MPI_Request req = simgrid::smpi::Request::f2c(*request);
-
- *ierr = MPI_Wait(&req, FORT_STATUS_IGNORE(status));
- if(req==MPI_REQUEST_NULL){
- simgrid::smpi::Request::free_f(*request);
- *request=MPI_FORTRAN_REQUEST_NULL;
- }
-}
-
-void mpi_waitany_(int* count, int* requests, int* index, MPI_Status* status, int* ierr) {
- MPI_Request* reqs;
- int i;
-
- reqs = xbt_new(MPI_Request, *count);
- for(i = 0; i < *count; i++) {
- reqs[i] = simgrid::smpi::Request::f2c(requests[i]);
- }
- *ierr = MPI_Waitany(*count, reqs, index, status);
- if(reqs[*index]==MPI_REQUEST_NULL){
- simgrid::smpi::Request::free_f(requests[*index]);
- requests[*index]=MPI_FORTRAN_REQUEST_NULL;
- }
- xbt_free(reqs);
-}
-
-void mpi_waitall_(int* count, int* requests, MPI_Status* status, int* ierr) {
- MPI_Request* reqs;
- int i;
-
- reqs = xbt_new(MPI_Request, *count);
- for(i = 0; i < *count; i++) {
- reqs[i] = simgrid::smpi::Request::f2c(requests[i]);
- }
- *ierr = MPI_Waitall(*count, reqs, FORT_STATUSES_IGNORE(status));
- for(i = 0; i < *count; i++) {
- if(reqs[i]==MPI_REQUEST_NULL){
- simgrid::smpi::Request::free_f(requests[i]);
- requests[i]=MPI_FORTRAN_REQUEST_NULL;
- }
- }
-
- xbt_free(reqs);
-}
-
-void mpi_barrier_(int* comm, int* ierr) {
- *ierr = MPI_Barrier(simgrid::smpi::Comm::f2c(*comm));
-}
-
-void mpi_bcast_(void *buf, int* count, int* datatype, int* root, int* comm, int* ierr) {
- *ierr = MPI_Bcast(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *root, simgrid::smpi::Comm::f2c(*comm));
-}
-
-void mpi_reduce_(void* sendbuf, void* recvbuf, int* count, int* datatype, int* op, int* root, int* comm, int* ierr) {
- sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
- sendbuf = static_cast<char *>( FORT_BOTTOM(sendbuf));
- recvbuf = static_cast<char *>( FORT_BOTTOM(recvbuf));
- *ierr = MPI_Reduce(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op), *root, simgrid::smpi::Comm::f2c(*comm));
-}
-
-void mpi_allreduce_(void* sendbuf, void* recvbuf, int* count, int* datatype, int* op, int* comm, int* ierr) {
- sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
- *ierr = MPI_Allreduce(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm));
-}
-
-void mpi_reduce_scatter_(void* sendbuf, void* recvbuf, int* recvcounts, int* datatype, int* op, int* comm, int* ierr) {
- sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
- *ierr = MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, simgrid::smpi::Datatype::f2c(*datatype),
- simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm));
-}
-
-void mpi_scatter_(void* sendbuf, int* sendcount, int* sendtype, void* recvbuf, int* recvcount, int* recvtype,
- int* root, int* comm, int* ierr) {
- recvbuf = static_cast<char *>( FORT_IN_PLACE(recvbuf));
- *ierr = MPI_Scatter(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype),
- recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm));
-}
-
-void mpi_scatterv_(void* sendbuf, int* sendcounts, int* displs, int* sendtype,
- void* recvbuf, int* recvcount, int* recvtype, int* root, int* comm, int* ierr) {
- recvbuf = static_cast<char *>( FORT_IN_PLACE(recvbuf));
- *ierr = MPI_Scatterv(sendbuf, sendcounts, displs, simgrid::smpi::Datatype::f2c(*sendtype),
- recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm));
-}
-
-void mpi_gather_(void* sendbuf, int* sendcount, int* sendtype, void* recvbuf, int* recvcount, int* recvtype,
- int* root, int* comm, int* ierr) {
- sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
- sendbuf = sendbuf!=MPI_IN_PLACE ? static_cast<char *>( FORT_BOTTOM(sendbuf)) : MPI_IN_PLACE;
- recvbuf = static_cast<char *>( FORT_BOTTOM(recvbuf));
- *ierr = MPI_Gather(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype),
- recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm));
-}
-
-void mpi_gatherv_(void* sendbuf, int* sendcount, int* sendtype,
- void* recvbuf, int* recvcounts, int* displs, int* recvtype, int* root, int* comm, int* ierr) {
- sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
- sendbuf = sendbuf!=MPI_IN_PLACE ? static_cast<char *>( FORT_BOTTOM(sendbuf)) : MPI_IN_PLACE;
- recvbuf = static_cast<char *>( FORT_BOTTOM(recvbuf));
- *ierr = MPI_Gatherv(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype),
- recvbuf, recvcounts, displs, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm));
-}
-
-void mpi_allgather_(void* sendbuf, int* sendcount, int* sendtype, void* recvbuf, int* recvcount, int* recvtype,
- int* comm, int* ierr) {
- sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
- *ierr = MPI_Allgather(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype),
- recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm));
-}
-
-void mpi_allgatherv_(void* sendbuf, int* sendcount, int* sendtype,
- void* recvbuf, int* recvcounts,int* displs, int* recvtype, int* comm, int* ierr) {
- sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
- *ierr = MPI_Allgatherv(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype),
- recvbuf, recvcounts, displs, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm));
-}
-
-void mpi_scan_(void* sendbuf, void* recvbuf, int* count, int* datatype, int* op, int* comm, int* ierr) {
- *ierr = MPI_Scan(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype),
- simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm));
-}
-
-void mpi_alltoall_(void* sendbuf, int* sendcount, int* sendtype,
- void* recvbuf, int* recvcount, int* recvtype, int* comm, int* ierr) {
- *ierr = MPI_Alltoall(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype),
- recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm));
-}
-
-void mpi_alltoallv_(void* sendbuf, int* sendcounts, int* senddisps, int* sendtype,
- void* recvbuf, int* recvcounts, int* recvdisps, int* recvtype, int* comm, int* ierr) {
- *ierr = MPI_Alltoallv(sendbuf, sendcounts, senddisps, simgrid::smpi::Datatype::f2c(*sendtype),
- recvbuf, recvcounts, recvdisps, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm));
-}
-
-void mpi_test_ (int * request, int *flag, MPI_Status * status, int* ierr){
- MPI_Request req = simgrid::smpi::Request::f2c(*request);
- *ierr= MPI_Test(&req, flag, FORT_STATUS_IGNORE(status));
- if(req==MPI_REQUEST_NULL){
- simgrid::smpi::Request::free_f(*request);
- *request=MPI_FORTRAN_REQUEST_NULL;
- }
-}
-
-void mpi_testall_ (int* count, int * requests, int *flag, MPI_Status * statuses, int* ierr){
- int i;
- MPI_Request* reqs = xbt_new(MPI_Request, *count);
- for(i = 0; i < *count; i++) {
- reqs[i] = simgrid::smpi::Request::f2c(requests[i]);
- }
- *ierr= MPI_Testall(*count, reqs, flag, FORT_STATUSES_IGNORE(statuses));
- for(i = 0; i < *count; i++) {
- if(reqs[i]==MPI_REQUEST_NULL){
- simgrid::smpi::Request::free_f(requests[i]);
- requests[i]=MPI_FORTRAN_REQUEST_NULL;
- }
- }
- xbt_free(reqs);
-}
-
void mpi_get_processor_name_(char *name, int *resultlen, int* ierr){
*ierr = MPI_Get_processor_name(name, resultlen);
}
*ierr = MPI_Attr_get(simgrid::smpi::Comm::f2c(*comm), *keyval, attr_value, flag);
}
-void mpi_type_extent_(int* datatype, MPI_Aint * extent, int* ierr){
- *ierr= MPI_Type_extent(simgrid::smpi::Datatype::f2c(*datatype), extent);
-}
-
-void mpi_type_commit_(int* datatype, int* ierr){
- MPI_Datatype tmp= simgrid::smpi::Datatype::f2c(*datatype);
- *ierr= MPI_Type_commit(&tmp);
-}
-
-void mpi_type_vector_(int* count, int* blocklen, int* stride, int* old_type, int* newtype, int* ierr){
- MPI_Datatype tmp;
- *ierr= MPI_Type_vector(*count, *blocklen, *stride, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *newtype = tmp->add_f();
- }
-}
-
-void mpi_type_create_vector_(int* count, int* blocklen, int* stride, int* old_type, int* newtype, int* ierr){
- MPI_Datatype tmp;
- *ierr= MPI_Type_vector(*count, *blocklen, *stride, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *newtype = tmp->add_f();
- }
-}
-
-void mpi_type_hvector_(int* count, int* blocklen, MPI_Aint* stride, int* old_type, int* newtype, int* ierr){
- MPI_Datatype tmp;
- *ierr= MPI_Type_hvector (*count, *blocklen, *stride, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *newtype = tmp->add_f();
- }
-}
-
-void mpi_type_create_hvector_(int* count, int* blocklen, MPI_Aint* stride, int* old_type, int* newtype, int* ierr){
- MPI_Datatype tmp;
- *ierr= MPI_Type_hvector(*count, *blocklen, *stride, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *newtype = tmp->add_f();
- }
-}
-
-void mpi_type_free_(int* datatype, int* ierr){
- MPI_Datatype tmp= simgrid::smpi::Datatype::f2c(*datatype);
- *ierr= MPI_Type_free (&tmp);
- if(*ierr == MPI_SUCCESS) {
- simgrid::smpi::F2C::free_f(*datatype);
- }
-}
-
-void mpi_type_ub_(int* datatype, MPI_Aint * disp, int* ierr){
- *ierr= MPI_Type_ub(simgrid::smpi::Datatype::f2c(*datatype), disp);
-}
-
-void mpi_type_lb_(int* datatype, MPI_Aint * extent, int* ierr){
- *ierr= MPI_Type_extent(simgrid::smpi::Datatype::f2c(*datatype), extent);
-}
-
-void mpi_type_size_(int* datatype, int *size, int* ierr)
-{
- *ierr = MPI_Type_size(simgrid::smpi::Datatype::f2c(*datatype), size);
-}
-
void mpi_error_string_(int* errorcode, char* string, int* resultlen, int* ierr){
*ierr = MPI_Error_string(*errorcode, string, resultlen);
}
name[*len]=' ';//blank padding, not \0
}
+void mpi_win_allocate_( MPI_Aint* size, int* disp_unit, int* info, int* comm, void* base, int* win, int* ierr){
+ MPI_Win tmp;
+ *ierr = MPI_Win_allocate( *size, *disp_unit, simgrid::smpi::Info::f2c(*info), simgrid::smpi::Comm::f2c(*comm),static_cast<void*>(base),&tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *win = tmp->add_f();
+ }
+}
+
+void mpi_win_attach_(int* win, int* base, MPI_Aint* size, int* ierr){
+ *ierr = MPI_Win_attach(simgrid::smpi::Win::f2c(*win), static_cast<void*>(base), *size);
+}
+
+void mpi_win_create_dynamic_( int* info, int* comm, int *win, int* ierr){
+ MPI_Win tmp;
+ *ierr = MPI_Win_create_dynamic( simgrid::smpi::Info::f2c(*info), simgrid::smpi::Comm::f2c(*comm),&tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *win = tmp->add_f();
+ }
+}
+
+void mpi_win_detach_(int* win, int* base, int* ierr){
+ *ierr = MPI_Win_detach(simgrid::smpi::Win::f2c(*win), static_cast<void*>(base));
+}
+
+void mpi_win_set_info_(int* win, int* info, int* ierr){
+ *ierr = MPI_Win_set_info(simgrid::smpi::Win::f2c(*win), simgrid::smpi::Info::f2c(*info));
+}
+
+void mpi_win_get_info_(int* win, int* info, int* ierr){
+ MPI_Info tmp;
+ *ierr = MPI_Win_get_info(simgrid::smpi::Win::f2c(*win), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *info = tmp->add_f();
+ }
+}
+
+void mpi_win_get_group_(int* win, int* group, int* ierr){
+ MPI_Group tmp;
+ *ierr = MPI_Win_get_group(simgrid::smpi::Win::f2c(*win), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *group = tmp->add_f();
+ }
+}
+
+void mpi_win_get_attr_(int* win, int* type_keyval, void* attribute_val, int* flag, int* ierr){
+ *ierr = MPI_Win_get_attr(simgrid::smpi::Win::f2c(*win), *type_keyval, attribute_val, flag);
+}
+
+void mpi_win_set_attr_(int* win, int* type_keyval, void* att, int* ierr){
+ *ierr = MPI_Win_set_attr(simgrid::smpi::Win::f2c(*win), *type_keyval, att);
+}
+
+void mpi_win_delete_attr_(int* win, int* comm_keyval, int* ierr){
+ *ierr = MPI_Win_delete_attr (simgrid::smpi::Win::f2c(*win), *comm_keyval);
+}
+
+void mpi_win_create_keyval_(void* copy_fn, void* delete_fn, int* keyval, void* extra_state, int* ierr){
+ *ierr = MPI_Win_create_keyval(reinterpret_cast<MPI_Win_copy_attr_function*>(copy_fn), reinterpret_cast<MPI_Win_delete_attr_function*>(delete_fn),
+ keyval, extra_state) ;
+}
+
+void mpi_win_free_keyval_(int* keyval, int* ierr){
+ *ierr = MPI_Win_free_keyval( keyval);
+}
+
+void mpi_win_lock_(int* lock_type, int* rank, int* assert, int* win, int* ierr){
+ *ierr = MPI_Win_lock(*lock_type, *rank, *assert, simgrid::smpi::Win::f2c(*win));
+}
+
+void mpi_win_lock_all_(int* assert, int* win, int* ierr){
+ *ierr = MPI_Win_lock_all(*assert, simgrid::smpi::Win::f2c(*win));
+}
+
+void mpi_win_unlock_(int* rank, int* win, int* ierr){
+ *ierr = MPI_Win_unlock(*rank, simgrid::smpi::Win::f2c(*win));
+}
+
+void mpi_win_unlock_all_(int* win, int* ierr){
+ *ierr = MPI_Win_unlock_all(simgrid::smpi::Win::f2c(*win));
+}
+
+void mpi_win_flush_(int* rank, int* win, int* ierr){
+ *ierr = MPI_Win_flush(*rank, simgrid::smpi::Win::f2c(*win));
+}
+
+void mpi_win_flush_local_(int* rank, int* win, int* ierr){
+ *ierr = MPI_Win_flush_local(*rank, simgrid::smpi::Win::f2c(*win));
+}
+void mpi_win_flush_all_(int* win, int* ierr){
+ *ierr = MPI_Win_flush_all(simgrid::smpi::Win::f2c(*win));
+}
+
+void mpi_win_flush_local_all_(int* win, int* ierr){
+ *ierr = MPI_Win_flush_local_all(simgrid::smpi::Win::f2c(*win));
+}
+
void mpi_info_create_( int *info, int* ierr){
MPI_Info tmp;
*ierr = MPI_Info_create(&tmp);
*target_disp, *target_count, simgrid::smpi::Datatype::f2c(*tarsmpi_type_f2c), simgrid::smpi::Win::f2c(*win));
}
+void mpi_rget_( int *origin_addr, int* origin_count, int* origin_datatype, int *target_rank,
+ MPI_Aint* target_disp, int *target_count, int* tarsmpi_type_f2c, int* win, int* request, int* ierr){
+ MPI_Request req;
+ *ierr = MPI_Rget( static_cast<void*>(origin_addr),*origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype),*target_rank,
+ *target_disp, *target_count, simgrid::smpi::Datatype::f2c(*tarsmpi_type_f2c), simgrid::smpi::Win::f2c(*win), &req);
+ if(*ierr == MPI_SUCCESS) {
+ *request = req->add_f();
+ }
+}
+
void mpi_accumulate_( int *origin_addr, int* origin_count, int* origin_datatype, int *target_rank,
MPI_Aint* target_disp, int *target_count, int* tarsmpi_type_f2c, int* op, int* win, int* ierr){
*ierr = MPI_Accumulate( static_cast<void *>(origin_addr),*origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype),*target_rank,
*target_disp, *target_count, simgrid::smpi::Datatype::f2c(*tarsmpi_type_f2c), simgrid::smpi::Op::f2c(*op), simgrid::smpi::Win::f2c(*win));
}
+void mpi_raccumulate_( int *origin_addr, int* origin_count, int* origin_datatype, int *target_rank,
+ MPI_Aint* target_disp, int *target_count, int* tarsmpi_type_f2c, int* op, int* win, int* request, int* ierr){
+ MPI_Request req;
+ *ierr = MPI_Raccumulate( static_cast<void *>(origin_addr),*origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype),*target_rank,
+ *target_disp, *target_count, simgrid::smpi::Datatype::f2c(*tarsmpi_type_f2c), simgrid::smpi::Op::f2c(*op), simgrid::smpi::Win::f2c(*win),&req);
+ if(*ierr == MPI_SUCCESS) {
+ *request = req->add_f();
+ }
+}
+
void mpi_put_( int *origin_addr, int* origin_count, int* origin_datatype, int *target_rank,
MPI_Aint* target_disp, int *target_count, int* tarsmpi_type_f2c, int* win, int* ierr){
*ierr = MPI_Put( static_cast<void *>(origin_addr),*origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype),*target_rank,
*target_disp, *target_count, simgrid::smpi::Datatype::f2c(*tarsmpi_type_f2c), simgrid::smpi::Win::f2c(*win));
}
+void mpi_rput_( int *origin_addr, int* origin_count, int* origin_datatype, int *target_rank,
+ MPI_Aint* target_disp, int *target_count, int* tarsmpi_type_f2c, int* win, int* request, int* ierr){
+ MPI_Request req;
+ *ierr = MPI_Rput( static_cast<void *>(origin_addr),*origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype),*target_rank,
+ *target_disp, *target_count, simgrid::smpi::Datatype::f2c(*tarsmpi_type_f2c), simgrid::smpi::Win::f2c(*win),&req);
+ if(*ierr == MPI_SUCCESS) {
+ *request = req->add_f();
+ }
+}
+
+void mpi_fetch_and_op_( int *origin_addr, int* result_addr, int* datatype, int* target_rank, MPI_Aint* target_disp, int* op, int* win, int* ierr){
+ *ierr = MPI_Fetch_and_op( static_cast<void *>(origin_addr),
+ static_cast<void *>(result_addr), simgrid::smpi::Datatype::f2c(*datatype),*target_rank,
+ *target_disp, simgrid::smpi::Op::f2c(*op), simgrid::smpi::Win::f2c(*win));
+}
+
+void mpi_compare_and_swap_( int *origin_addr, int* compare_addr, int* result_addr,
+ int* datatype, int* target_rank, MPI_Aint* target_disp, int* win, int* ierr){
+ *ierr = MPI_Compare_and_swap( static_cast<void *>(origin_addr),static_cast<void *>(compare_addr),
+ static_cast<void *>(result_addr), simgrid::smpi::Datatype::f2c(*datatype),*target_rank,
+ *target_disp, simgrid::smpi::Win::f2c(*win));
+}
+
+void mpi_get_accumulate_(int *origin_addr, int* origin_count, int* origin_datatype, int* result_addr,
+ int* result_count, int* result_datatype, int* target_rank, MPI_Aint* target_disp, int* target_count,
+ int* target_datatype, int* op, int* win, int* ierr){
+ *ierr = MPI_Get_accumulate( static_cast<void *>(origin_addr), *origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype),
+ static_cast<void *>(result_addr), *result_count, simgrid::smpi::Datatype::f2c(*result_datatype),
+ *target_rank, *target_disp, *target_count, simgrid::smpi::Datatype::f2c(*target_datatype),
+ simgrid::smpi::Op::f2c(*op), simgrid::smpi::Win::f2c(*win));
+}
+
+void mpi_rget_accumulate_(int *origin_addr, int* origin_count, int* origin_datatype, int* result_addr,
+ int* result_count, int* result_datatype, int* target_rank, MPI_Aint* target_disp, int* target_count,
+ int* target_datatype, int* op, int* win, int* request, int* ierr){
+ MPI_Request req;
+ *ierr = MPI_Rget_accumulate( static_cast<void *>(origin_addr), *origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype),
+ static_cast<void *>(result_addr), *result_count, simgrid::smpi::Datatype::f2c(*result_datatype),
+ *target_rank, *target_disp, *target_count, simgrid::smpi::Datatype::f2c(*target_datatype),
+ simgrid::smpi::Op::f2c(*op), simgrid::smpi::Win::f2c(*win), &req);
+ if(*ierr == MPI_SUCCESS) {
+ *request = req->add_f();
+ }
+}
+
//following are automatically generated, and have to be checked
void mpi_finalized_ (int * flag, int* ierr){
*ierr = MPI_Get_address(location, address);
}
-void mpi_type_dup_ (int* datatype, int* newdatatype, int* ierr){
- MPI_Datatype tmp;
- *ierr = MPI_Type_dup(simgrid::smpi::Datatype::f2c(*datatype), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *newdatatype = tmp->add_f();
- }
-}
-
-void mpi_type_set_name_ (int* datatype, char * name, int* ierr, int size){
- char* tname = xbt_new(char, size+1);
- strncpy(tname, name, size);
- tname[size]='\0';
- *ierr = MPI_Type_set_name(simgrid::smpi::Datatype::f2c(*datatype), tname);
- xbt_free(tname);
-}
-
-void mpi_type_get_name_ (int* datatype, char * name, int* len, int* ierr){
- *ierr = MPI_Type_get_name(simgrid::smpi::Datatype::f2c(*datatype),name,len);
- if(*len>0)
- name[*len]=' ';
-}
-
-void mpi_type_get_attr_ (int* type, int* type_keyval, void *attribute_val, int* flag, int* ierr){
-
- *ierr = MPI_Type_get_attr ( simgrid::smpi::Datatype::f2c(*type), *type_keyval, attribute_val,flag);
-}
-
-void mpi_type_set_attr_ (int* type, int* type_keyval, void *attribute_val, int* ierr){
-
- *ierr = MPI_Type_set_attr ( simgrid::smpi::Datatype::f2c(*type), *type_keyval, attribute_val);
-}
-
-void mpi_type_delete_attr_ (int* type, int* type_keyval, int* ierr){
-
- *ierr = MPI_Type_delete_attr ( simgrid::smpi::Datatype::f2c(*type), *type_keyval);
-}
-
-void mpi_type_create_keyval_ (void* copy_fn, void* delete_fn, int* keyval, void* extra_state, int* ierr){
-
- *ierr = MPI_Type_create_keyval(reinterpret_cast<MPI_Type_copy_attr_function*>(copy_fn), reinterpret_cast<MPI_Type_delete_attr_function*>(delete_fn),
- keyval, extra_state) ;
-}
-
-void mpi_type_free_keyval_ (int* keyval, int* ierr) {
- *ierr = MPI_Type_free_keyval( keyval);
-}
-
void mpi_pcontrol_ (int* level , int* ierr){
*ierr = MPI_Pcontrol(*static_cast<const int*>(level));
}
-void mpi_type_get_extent_ (int* datatype, MPI_Aint * lb, MPI_Aint * extent, int* ierr){
-
- *ierr = MPI_Type_get_extent(simgrid::smpi::Datatype::f2c(*datatype), lb, extent);
-}
-
-void mpi_type_get_true_extent_ (int* datatype, MPI_Aint * lb, MPI_Aint * extent, int* ierr){
-
- *ierr = MPI_Type_get_true_extent(simgrid::smpi::Datatype::f2c(*datatype), lb, extent);
-}
-
void mpi_op_create_ (void * function, int* commute, int* op, int* ierr){
MPI_Op tmp;
*ierr = MPI_Op_create(reinterpret_cast<MPI_User_function*>(function),*commute, &tmp);
}
}
+void mpi_op_commutative_ (int* op, int* commute, int* ierr){
+ *ierr = MPI_Op_commutative(simgrid::smpi::Op::f2c(*op), commute);
+}
+
void mpi_group_free_ (int* group, int* ierr){
MPI_Group tmp = simgrid::smpi::Group::f2c(*group);
*ierr = MPI_Group_free(&tmp);
}
}
-void mpi_comm_get_attr_ (int* comm, int* comm_keyval, void *attribute_val, int *flag, int* ierr){
-
- *ierr = MPI_Comm_get_attr (simgrid::smpi::Comm::f2c(*comm), *comm_keyval, attribute_val, flag);
-}
-
-void mpi_comm_set_attr_ (int* comm, int* comm_keyval, void *attribute_val, int* ierr){
-
- *ierr = MPI_Comm_set_attr ( simgrid::smpi::Comm::f2c(*comm), *comm_keyval, attribute_val);
-}
-
-void mpi_comm_delete_attr_ (int* comm, int* comm_keyval, int* ierr){
-
- *ierr = MPI_Comm_delete_attr (simgrid::smpi::Comm::f2c(*comm), *comm_keyval);
-}
-
-void mpi_comm_create_keyval_ (void* copy_fn, void* delete_fn, int* keyval, void* extra_state, int* ierr){
-
- *ierr = MPI_Comm_create_keyval(reinterpret_cast<MPI_Comm_copy_attr_function*>(copy_fn), reinterpret_cast<MPI_Comm_delete_attr_function*>(delete_fn),
- keyval, extra_state) ;
-}
-
-void mpi_comm_free_keyval_ (int* keyval, int* ierr) {
- *ierr = MPI_Comm_free_keyval( keyval);
-}
-
-void mpi_comm_get_name_ (int* comm, char* name, int* len, int* ierr){
- *ierr = MPI_Comm_get_name(simgrid::smpi::Comm::f2c(*comm), name, len);
- if(*len>0)
- name[*len]=' ';
-}
-
-void mpi_comm_compare_ (int* comm1, int* comm2, int *result, int* ierr){
-
- *ierr = MPI_Comm_compare(simgrid::smpi::Comm::f2c(*comm1), simgrid::smpi::Comm::f2c(*comm2), result);
-}
-
-void mpi_comm_disconnect_ (int* comm, int* ierr){
- MPI_Comm tmp = simgrid::smpi::Comm::f2c(*comm);
- *ierr = MPI_Comm_disconnect(&tmp);
- if(*ierr == MPI_SUCCESS) {
- simgrid::smpi::Comm::free_f(*comm);
- }
-}
-
void mpi_request_free_ (int* request, int* ierr){
MPI_Request tmp=simgrid::smpi::Request::f2c(*request);
*ierr = MPI_Request_free(&tmp);
}
}
-void mpi_sendrecv_replace_ (void *buf, int* count, int* datatype, int* dst, int* sendtag, int* src, int* recvtag,
- int* comm, MPI_Status* status, int* ierr)
-{
- *ierr = MPI_Sendrecv_replace(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *sendtag, *src,
- *recvtag, simgrid::smpi::Comm::f2c(*comm), FORT_STATUS_IGNORE(status));
-}
-
-void mpi_testany_ (int* count, int* requests, int *index, int *flag, MPI_Status* status, int* ierr)
-{
- MPI_Request* reqs;
- int i;
-
- reqs = xbt_new(MPI_Request, *count);
- for(i = 0; i < *count; i++) {
- reqs[i] = simgrid::smpi::Request::f2c(requests[i]);
- }
- *ierr = MPI_Testany(*count, reqs, index, flag, FORT_STATUS_IGNORE(status));
- if(*index!=MPI_UNDEFINED && reqs[*index]==MPI_REQUEST_NULL){
- simgrid::smpi::Request::free_f(requests[*index]);
- requests[*index]=MPI_FORTRAN_REQUEST_NULL;
- }
- xbt_free(reqs);
-}
-
-void mpi_waitsome_ (int* incount, int* requests, int *outcount, int *indices, MPI_Status* status, int* ierr)
-{
- MPI_Request* reqs;
- int i;
-
- reqs = xbt_new(MPI_Request, *incount);
- for(i = 0; i < *incount; i++) {
- reqs[i] = simgrid::smpi::Request::f2c(requests[i]);
- }
- *ierr = MPI_Waitsome(*incount, reqs, outcount, indices, status);
- for(i=0;i<*outcount;i++){
- if(reqs[indices[i]]==MPI_REQUEST_NULL){
- simgrid::smpi::Request::free_f(requests[indices[i]]);
- requests[indices[i]]=MPI_FORTRAN_REQUEST_NULL;
- }
- }
- xbt_free(reqs);
-}
-
-void mpi_reduce_local_ (void *inbuf, void *inoutbuf, int* count, int* datatype, int* op, int* ierr){
-
- *ierr = MPI_Reduce_local(inbuf, inoutbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op));
-}
-
-void mpi_reduce_scatter_block_ (void *sendbuf, void *recvbuf, int* recvcount, int* datatype, int* op, int* comm,
- int* ierr)
-{
- sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
- *ierr = MPI_Reduce_scatter_block(sendbuf, recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op),
- simgrid::smpi::Comm::f2c(*comm));
-}
-
void mpi_pack_size_ (int* incount, int* datatype, int* comm, int* size, int* ierr) {
*ierr = MPI_Pack_size(*incount, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Comm::f2c(*comm), size);
}
*ierr = MPI_Errhandler_set(simgrid::smpi::Comm::f2c(*comm), *static_cast<MPI_Errhandler*>(errhandler));
}
-void mpi_comm_set_errhandler_ (int* comm, void* errhandler, int* ierr) {
- *ierr = MPI_Errhandler_set(simgrid::smpi::Comm::f2c(*comm), *static_cast<MPI_Errhandler*>(errhandler));
-}
-
-void mpi_comm_get_errhandler_ (int* comm, void* errhandler, int* ierr) {
- *ierr = MPI_Errhandler_set(simgrid::smpi::Comm::f2c(*comm), static_cast<MPI_Errhandler*>(errhandler));
-}
-
-void mpi_type_contiguous_ (int* count, int* old_type, int* newtype, int* ierr) {
- MPI_Datatype tmp;
- *ierr = MPI_Type_contiguous(*count, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *newtype = tmp->add_f();
- }
-}
-
void mpi_cancel_ (int* request, int* ierr) {
MPI_Request tmp=simgrid::smpi::Request::f2c(*request);
*ierr = MPI_Cancel(&tmp);
*ierr = MPI_Buffer_detach(buffer, size);
}
-void mpi_testsome_ (int* incount, int* requests, int* outcount, int* indices, MPI_Status* statuses, int* ierr) {
- MPI_Request* reqs;
- int i;
-
- reqs = xbt_new(MPI_Request, *incount);
- for(i = 0; i < *incount; i++) {
- reqs[i] = simgrid::smpi::Request::f2c(requests[i]);
- indices[i]=0;
- }
- *ierr = MPI_Testsome(*incount, reqs, outcount, indices, FORT_STATUSES_IGNORE(statuses));
- for(i=0;i<*incount;i++){
- if(indices[i] && reqs[indices[i]]==MPI_REQUEST_NULL){
- simgrid::smpi::Request::free_f(requests[indices[i]]);
- requests[indices[i]]=MPI_FORTRAN_REQUEST_NULL;
- }
- }
- xbt_free(reqs);
-}
-
-void mpi_comm_test_inter_ (int* comm, int* flag, int* ierr) {
- *ierr = MPI_Comm_test_inter(simgrid::smpi::Comm::f2c(*comm), flag);
-}
-
-void mpi_unpack_ (void* inbuf, int* insize, int* position, void* outbuf, int* outcount, int* type, int* comm,
- int* ierr) {
- *ierr = MPI_Unpack(inbuf, *insize, position, outbuf, *outcount, simgrid::smpi::Datatype::f2c(*type), simgrid::smpi::Comm::f2c(*comm));
-}
-
-void mpi_pack_external_size_ (char *datarep, int* incount, int* datatype, MPI_Aint *size, int* ierr){
- *ierr = MPI_Pack_external_size(datarep, *incount, simgrid::smpi::Datatype::f2c(*datatype), size);
-}
-
-void mpi_pack_external_ (char *datarep, void *inbuf, int* incount, int* datatype, void *outbuf, MPI_Aint* outcount,
- MPI_Aint *position, int* ierr){
- *ierr = MPI_Pack_external(datarep, inbuf, *incount, simgrid::smpi::Datatype::f2c(*datatype), outbuf, *outcount, position);
-}
-
-void mpi_unpack_external_ ( char *datarep, void *inbuf, MPI_Aint* insize, MPI_Aint *position, void *outbuf,
- int* outcount, int* datatype, int* ierr){
- *ierr = MPI_Unpack_external( datarep, inbuf, *insize, position, outbuf, *outcount, simgrid::smpi::Datatype::f2c(*datatype));
-}
-
-void mpi_type_hindexed_ (int* count, int* blocklens, MPI_Aint* indices, int* old_type, int* newtype, int* ierr) {
- MPI_Datatype tmp;
- *ierr = MPI_Type_hindexed(*count, blocklens, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *newtype = tmp->add_f();
- }
-}
-
-void mpi_type_create_hindexed_(int* count, int* blocklens, MPI_Aint* indices, int* old_type, int* newtype, int* ierr){
- MPI_Datatype tmp;
- *ierr = MPI_Type_create_hindexed(*count, blocklens, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *newtype = tmp->add_f();
- }
-}
-
-void mpi_type_create_hindexed_block_ (int* count, int* blocklength, MPI_Aint* indices, int* old_type, int* newtype,
- int* ierr) {
- MPI_Datatype tmp;
- *ierr = MPI_Type_create_hindexed_block(*count, *blocklength, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *newtype = tmp->add_f();
- }
-}
-
-void mpi_type_indexed_ (int* count, int* blocklens, int* indices, int* old_type, int* newtype, int* ierr) {
- MPI_Datatype tmp;
- *ierr = MPI_Type_indexed(*count, blocklens, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *newtype = tmp->add_f();
- }
-}
-
-void mpi_type_create_indexed_block_ (int* count, int* blocklength, int* indices, int* old_type, int*newtype,
- int* ierr){
- MPI_Datatype tmp;
- *ierr = MPI_Type_create_indexed_block(*count, *blocklength, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *newtype = tmp->add_f();
- }
-}
-
-void mpi_type_struct_ (int* count, int* blocklens, MPI_Aint* indices, int* old_types, int* newtype, int* ierr) {
- MPI_Datatype tmp;
- int i=0;
- MPI_Datatype* types = static_cast<MPI_Datatype*>(xbt_malloc(*count*sizeof(MPI_Datatype)));
- for(i=0; i< *count; i++){
- types[i] = simgrid::smpi::Datatype::f2c(old_types[i]);
- }
- *ierr = MPI_Type_struct(*count, blocklens, indices, types, &tmp);
- if(*ierr == MPI_SUCCESS) {
- *newtype = tmp->add_f();
- }
- xbt_free(types);
-}
-
-void mpi_type_create_struct_(int* count, int* blocklens, MPI_Aint* indices, int* old_types, int* newtype, int* ierr){
- MPI_Datatype tmp;
- int i=0;
- MPI_Datatype* types = static_cast<MPI_Datatype*>(xbt_malloc(*count*sizeof(MPI_Datatype)));
- for(i=0; i< *count; i++){
- types[i] = simgrid::smpi::Datatype::f2c(old_types[i]);
- }
- *ierr = MPI_Type_create_struct(*count, blocklens, indices, types, &tmp);
- if(*ierr == MPI_SUCCESS) {
- *newtype = tmp->add_f();
- }
- xbt_free(types);
-}
-
-void mpi_ssend_ (void* buf, int* count, int* datatype, int* dest, int* tag, int* comm, int* ierr) {
- *ierr = MPI_Ssend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm));
-}
-void mpi_ssend_init_ (void* buf, int* count, int* datatype, int* dest, int* tag, int* comm, int* request, int* ierr) {
- MPI_Request tmp;
- *ierr = MPI_Ssend_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *request = tmp->add_f();
- }
-}
void mpi_intercomm_create_ (int* local_comm, int *local_leader, int* peer_comm, int* remote_leader, int* tag,
int* comm_out, int* ierr) {
}
}
-void mpi_bsend_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* ierr) {
- *ierr = MPI_Bsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm));
-}
-
-void mpi_bsend_init_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) {
- MPI_Request tmp;
- *ierr = MPI_Bsend_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *request = tmp->add_f();
- }
-}
-
-void mpi_ibsend_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) {
- MPI_Request tmp;
- *ierr = MPI_Ibsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *request = tmp->add_f();
- }
-}
-
-void mpi_comm_remote_group_ (int* comm, int* group, int* ierr) {
- MPI_Group tmp;
- *ierr = MPI_Comm_remote_group(simgrid::smpi::Comm::f2c(*comm), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *group = tmp->c2f();
- }
-}
-
-void mpi_comm_remote_size_ (int* comm, int* size, int* ierr) {
- *ierr = MPI_Comm_remote_size(simgrid::smpi::Comm::f2c(*comm), size);
-}
-
-void mpi_issend_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) {
- MPI_Request tmp;
- *ierr = MPI_Issend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *request = tmp->add_f();
- }
-}
-
-void mpi_probe_ (int* source, int* tag, int* comm, MPI_Status* status, int* ierr) {
- *ierr = MPI_Probe(*source, *tag, simgrid::smpi::Comm::f2c(*comm), FORT_STATUS_IGNORE(status));
-}
-
void mpi_attr_delete_ (int* comm, int* keyval, int* ierr) {
*ierr = MPI_Attr_delete(simgrid::smpi::Comm::f2c(*comm), *keyval);
}
*ierr = MPI_Attr_put(simgrid::smpi::Comm::f2c(*comm), *keyval, attr_value);
}
-void mpi_rsend_init_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) {
- MPI_Request tmp;
- *ierr = MPI_Rsend_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *request = tmp->add_f();
- }
-}
-
void mpi_keyval_create_ (void* copy_fn, void* delete_fn, int* keyval, void* extra_state, int* ierr) {
*ierr = MPI_Keyval_create(reinterpret_cast<MPI_Copy_function*>(copy_fn),reinterpret_cast<MPI_Delete_function*>(delete_fn), keyval, extra_state);
}
*ierr = MPI_Test_cancelled(status, flag);
}
-void mpi_pack_ (void* inbuf, int* incount, int* type, void* outbuf, int* outcount, int* position, int* comm, int* ierr) {
- *ierr = MPI_Pack(inbuf, *incount, simgrid::smpi::Datatype::f2c(*type), outbuf, *outcount, position, simgrid::smpi::Comm::f2c(*comm));
-}
-
void mpi_get_elements_ (MPI_Status* status, int* datatype, int* elements, int* ierr) {
*ierr = MPI_Get_elements(status, simgrid::smpi::Datatype::f2c(*datatype), elements);
}
*ierr = MPI_Dims_create(*nnodes, *ndims, dims);
}
-void mpi_iprobe_ (int* source, int* tag, int* comm, int* flag, MPI_Status* status, int* ierr) {
- *ierr = MPI_Iprobe(*source, *tag, simgrid::smpi::Comm::f2c(*comm), flag, status);
-}
-
-void mpi_type_get_envelope_ ( int* datatype, int *num_integers, int *num_addresses, int *num_datatypes, int *combiner,
- int* ierr){
- *ierr = MPI_Type_get_envelope( simgrid::smpi::Datatype::f2c(*datatype), num_integers,
- num_addresses, num_datatypes, combiner);
-}
-
-void mpi_type_get_contents_ (int* datatype, int* max_integers, int* max_addresses, int* max_datatypes,
- int* array_of_integers, MPI_Aint* array_of_addresses,
- int* array_of_datatypes, int* ierr){
- *ierr = MPI_Type_get_contents(simgrid::smpi::Datatype::f2c(*datatype), *max_integers, *max_addresses,*max_datatypes,
- array_of_integers, array_of_addresses, reinterpret_cast<MPI_Datatype*>(array_of_datatypes));
-}
-
-void mpi_type_create_darray_ (int* size, int* rank, int* ndims, int* array_of_gsizes, int* array_of_distribs,
- int* array_of_dargs, int* array_of_psizes,
- int* order, int* oldtype, int*newtype, int* ierr) {
- MPI_Datatype tmp;
- *ierr = MPI_Type_create_darray(*size, *rank, *ndims, array_of_gsizes,
- array_of_distribs, array_of_dargs, array_of_psizes,
- *order, simgrid::smpi::Datatype::f2c(*oldtype), &tmp) ;
- if(*ierr == MPI_SUCCESS) {
- *newtype = tmp->add_f();
- }
-}
-
-void mpi_type_create_resized_ (int* oldtype,MPI_Aint* lb, MPI_Aint* extent, int*newtype, int* ierr){
- MPI_Datatype tmp;
- *ierr = MPI_Type_create_resized(simgrid::smpi::Datatype::f2c(*oldtype),*lb, *extent, &tmp);
- if(*ierr == MPI_SUCCESS) {
- *newtype = tmp->add_f();
- }
-}
-
-void mpi_type_create_subarray_ (int* ndims,int *array_of_sizes, int *array_of_subsizes, int *array_of_starts,
- int* order, int* oldtype, int*newtype, int* ierr){
- MPI_Datatype tmp;
- *ierr = MPI_Type_create_subarray(*ndims,array_of_sizes, array_of_subsizes, array_of_starts, *order,
- simgrid::smpi::Datatype::f2c(*oldtype), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *newtype = tmp->add_f();
- }
-}
-
-void mpi_type_match_size_ (int* typeclass,int* size,int* datatype, int* ierr){
- MPI_Datatype tmp;
- *ierr = MPI_Type_match_size(*typeclass,*size,&tmp);
- if(*ierr == MPI_SUCCESS) {
- *datatype = tmp->c2f();
- }
-}
-
-void mpi_alltoallw_ ( void *sendbuf, int *sendcnts, int *sdispls, int* sendtypes, void *recvbuf, int *recvcnts,
- int *rdispls, int* recvtypes, int* comm, int* ierr){
- *ierr = MPI_Alltoallw( sendbuf, sendcnts, sdispls, reinterpret_cast<MPI_Datatype*>(sendtypes), recvbuf, recvcnts, rdispls,
- reinterpret_cast<MPI_Datatype*>(recvtypes), simgrid::smpi::Comm::f2c(*comm));
-}
-
-void mpi_exscan_ (void *sendbuf, void *recvbuf, int* count, int* datatype, int* op, int* comm, int* ierr){
- *ierr = MPI_Exscan(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm));
-}
-
-void mpi_comm_set_name_ (int* comm, char* name, int* ierr, int size){
- char* tname = xbt_new(char, size+1);
- strncpy(tname, name, size);
- tname[size]='\0';
- *ierr = MPI_Comm_set_name (simgrid::smpi::Comm::f2c(*comm), tname);
- xbt_free(tname);
-}
-
-void mpi_comm_dup_with_info_ (int* comm, int* info, int* newcomm, int* ierr){
- MPI_Comm tmp;
- *ierr = MPI_Comm_dup_with_info(simgrid::smpi::Comm::f2c(*comm), simgrid::smpi::Info::f2c(*info),&tmp);
- if(*ierr == MPI_SUCCESS) {
- *newcomm = tmp->add_f();
- }
-}
-
-void mpi_comm_split_type_ (int* comm, int* split_type, int* key, int* info, int* newcomm, int* ierr){
- MPI_Comm tmp;
- *ierr = MPI_Comm_split_type(simgrid::smpi::Comm::f2c(*comm), *split_type, *key, simgrid::smpi::Info::f2c(*info), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *newcomm = tmp->add_f();
- }
-}
-
-void mpi_comm_set_info_ (int* comm, int* info, int* ierr){
- *ierr = MPI_Comm_set_info (simgrid::smpi::Comm::f2c(*comm), simgrid::smpi::Info::f2c(*info));
-}
-
-void mpi_comm_get_info_ (int* comm, int* info, int* ierr){
- MPI_Info tmp;
- *ierr = MPI_Comm_get_info (simgrid::smpi::Comm::f2c(*comm), &tmp);
- if(*ierr==MPI_SUCCESS){
- *info = tmp->c2f();
- }
-}
-
-void mpi_comm_create_errhandler_ ( void *function, void *errhandler, int* ierr){
- *ierr = MPI_Comm_create_errhandler( reinterpret_cast<MPI_Comm_errhandler_fn*>(function), static_cast<MPI_Errhandler*>(errhandler));
-}
-
void mpi_add_error_class_ ( int *errorclass, int* ierr){
*ierr = MPI_Add_error_class( errorclass);
}
*ierr = MPI_Add_error_string(*errorcode, string);
}
-void mpi_comm_call_errhandler_ (int* comm,int* errorcode, int* ierr){
- *ierr = MPI_Comm_call_errhandler(simgrid::smpi::Comm::f2c(*comm), *errorcode);
-}
-
void mpi_info_dup_ (int* info, int* newinfo, int* ierr){
MPI_Info tmp;
*ierr = MPI_Info_dup(simgrid::smpi::Info::f2c(*info), &tmp);
*ierr = MPI_Status_set_elements( status, simgrid::smpi::Datatype::f2c(*datatype), *count);
}
-void mpi_comm_connect_ ( char *port_name, int* info, int* root, int* comm, int*newcomm, int* ierr){
- MPI_Comm tmp;
- *ierr = MPI_Comm_connect( port_name, *reinterpret_cast<MPI_Info*>(info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *newcomm = tmp->add_f();
- }
-}
-
void mpi_publish_name_ ( char *service_name, int* info, char *port_name, int* ierr){
*ierr = MPI_Publish_name( service_name, *reinterpret_cast<MPI_Info*>(info), port_name);
}
*ierr = MPI_Lookup_name( service_name, *reinterpret_cast<MPI_Info*>(info), port_name);
}
-void mpi_comm_join_ ( int* fd, int* intercomm, int* ierr){
- MPI_Comm tmp;
- *ierr = MPI_Comm_join( *fd, &tmp);
- if(*ierr == MPI_SUCCESS) {
- *intercomm = tmp->add_f();
- }
-}
-
void mpi_open_port_ ( int* info, char *port_name, int* ierr){
*ierr = MPI_Open_port( *reinterpret_cast<MPI_Info*>(info),port_name);
}
*ierr = MPI_Close_port( port_name);
}
-void mpi_comm_accept_ ( char *port_name, int* info, int* root, int* comm, int*newcomm, int* ierr){
- MPI_Comm tmp;
- *ierr = MPI_Comm_accept( port_name, *reinterpret_cast<MPI_Info*>(info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp);
- if(*ierr == MPI_SUCCESS) {
- *newcomm = tmp->add_f();
- }
-}
-
-void mpi_comm_spawn_ ( char *command, char *argv, int* maxprocs, int* info, int* root, int* comm, int* intercomm,
- int* array_of_errcodes, int* ierr){
- MPI_Comm tmp;
- *ierr = MPI_Comm_spawn( command, nullptr, *maxprocs, *reinterpret_cast<MPI_Info*>(info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp,
- array_of_errcodes);
- if(*ierr == MPI_SUCCESS) {
- *intercomm = tmp->add_f();
- }
-}
-
-void mpi_comm_spawn_multiple_ ( int* count, char *array_of_commands, char** array_of_argv, int* array_of_maxprocs,
- int* array_of_info, int* root,
- int* comm, int* intercomm, int* array_of_errcodes, int* ierr){
- MPI_Comm tmp;
- *ierr = MPI_Comm_spawn_multiple(* count, &array_of_commands, &array_of_argv, array_of_maxprocs,
- reinterpret_cast<MPI_Info*>(array_of_info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp, array_of_errcodes);
- if(*ierr == MPI_SUCCESS) {
- *intercomm = tmp->add_f();
- }
-}
-
-void mpi_comm_get_parent_ ( int* parent, int* ierr){
- MPI_Comm tmp;
- *ierr = MPI_Comm_get_parent( &tmp);
- if(*ierr == MPI_SUCCESS) {
- *parent = tmp->c2f();
- }
-}
-
void mpi_file_close_ ( int* file, int* ierr){
*ierr= MPI_File_close(reinterpret_cast<MPI_File*>(*file));
}
--- /dev/null
+/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "private.h"
+#include "smpi_comm.hpp"
+#include "smpi_datatype.hpp"
+#include "smpi_op.hpp"
+#include "smpi_coll.hpp"
+
+extern "C" { // This should really use the C linkage to be usable from Fortran
+
+void mpi_barrier_(int* comm, int* ierr) {
+ *ierr = MPI_Barrier(simgrid::smpi::Comm::f2c(*comm));
+}
+
+void mpi_bcast_(void *buf, int* count, int* datatype, int* root, int* comm, int* ierr) {
+ *ierr = MPI_Bcast(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *root, simgrid::smpi::Comm::f2c(*comm));
+}
+
+void mpi_reduce_(void* sendbuf, void* recvbuf, int* count, int* datatype, int* op, int* root, int* comm, int* ierr) {
+ sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
+ sendbuf = static_cast<char *>( FORT_BOTTOM(sendbuf));
+ recvbuf = static_cast<char *>( FORT_BOTTOM(recvbuf));
+ *ierr = MPI_Reduce(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op), *root, simgrid::smpi::Comm::f2c(*comm));
+}
+
+void mpi_allreduce_(void* sendbuf, void* recvbuf, int* count, int* datatype, int* op, int* comm, int* ierr) {
+ sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
+ *ierr = MPI_Allreduce(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm));
+}
+
+void mpi_reduce_scatter_(void* sendbuf, void* recvbuf, int* recvcounts, int* datatype, int* op, int* comm, int* ierr) {
+ sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
+ *ierr = MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, simgrid::smpi::Datatype::f2c(*datatype),
+ simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm));
+}
+
+void mpi_scatter_(void* sendbuf, int* sendcount, int* sendtype, void* recvbuf, int* recvcount, int* recvtype,
+ int* root, int* comm, int* ierr) {
+ recvbuf = static_cast<char *>( FORT_IN_PLACE(recvbuf));
+ *ierr = MPI_Scatter(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype),
+ recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm));
+}
+
+void mpi_scatterv_(void* sendbuf, int* sendcounts, int* displs, int* sendtype,
+ void* recvbuf, int* recvcount, int* recvtype, int* root, int* comm, int* ierr) {
+ recvbuf = static_cast<char *>( FORT_IN_PLACE(recvbuf));
+ *ierr = MPI_Scatterv(sendbuf, sendcounts, displs, simgrid::smpi::Datatype::f2c(*sendtype),
+ recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm));
+}
+
+void mpi_gather_(void* sendbuf, int* sendcount, int* sendtype, void* recvbuf, int* recvcount, int* recvtype,
+ int* root, int* comm, int* ierr) {
+ sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
+ sendbuf = sendbuf!=MPI_IN_PLACE ? static_cast<char *>( FORT_BOTTOM(sendbuf)) : MPI_IN_PLACE;
+ recvbuf = static_cast<char *>( FORT_BOTTOM(recvbuf));
+ *ierr = MPI_Gather(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype),
+ recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm));
+}
+
+void mpi_gatherv_(void* sendbuf, int* sendcount, int* sendtype,
+ void* recvbuf, int* recvcounts, int* displs, int* recvtype, int* root, int* comm, int* ierr) {
+ sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
+ sendbuf = sendbuf!=MPI_IN_PLACE ? static_cast<char *>( FORT_BOTTOM(sendbuf)) : MPI_IN_PLACE;
+ recvbuf = static_cast<char *>( FORT_BOTTOM(recvbuf));
+ *ierr = MPI_Gatherv(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype),
+ recvbuf, recvcounts, displs, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm));
+}
+
+void mpi_allgather_(void* sendbuf, int* sendcount, int* sendtype, void* recvbuf, int* recvcount, int* recvtype,
+ int* comm, int* ierr) {
+ sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
+ *ierr = MPI_Allgather(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype),
+ recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm));
+}
+
+void mpi_allgatherv_(void* sendbuf, int* sendcount, int* sendtype,
+ void* recvbuf, int* recvcounts,int* displs, int* recvtype, int* comm, int* ierr) {
+ sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
+ *ierr = MPI_Allgatherv(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype),
+ recvbuf, recvcounts, displs, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm));
+}
+
+void mpi_scan_(void* sendbuf, void* recvbuf, int* count, int* datatype, int* op, int* comm, int* ierr) {
+ *ierr = MPI_Scan(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype),
+ simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm));
+}
+
+void mpi_alltoall_(void* sendbuf, int* sendcount, int* sendtype,
+ void* recvbuf, int* recvcount, int* recvtype, int* comm, int* ierr) {
+ *ierr = MPI_Alltoall(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype),
+ recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm));
+}
+
+void mpi_alltoallv_(void* sendbuf, int* sendcounts, int* senddisps, int* sendtype,
+ void* recvbuf, int* recvcounts, int* recvdisps, int* recvtype, int* comm, int* ierr) {
+ *ierr = MPI_Alltoallv(sendbuf, sendcounts, senddisps, simgrid::smpi::Datatype::f2c(*sendtype),
+ recvbuf, recvcounts, recvdisps, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm));
+}
+
+void mpi_reduce_local_ (void *inbuf, void *inoutbuf, int* count, int* datatype, int* op, int* ierr){
+
+ *ierr = MPI_Reduce_local(inbuf, inoutbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op));
+}
+
+void mpi_reduce_scatter_block_ (void *sendbuf, void *recvbuf, int* recvcount, int* datatype, int* op, int* comm,
+ int* ierr)
+{
+ sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
+ *ierr = MPI_Reduce_scatter_block(sendbuf, recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op),
+ simgrid::smpi::Comm::f2c(*comm));
+}
+
+void mpi_alltoallw_ ( void *sendbuf, int *sendcnts, int *sdispls, int* sendtypes, void *recvbuf, int *recvcnts,
+ int *rdispls, int* recvtypes, int* comm, int* ierr){
+ *ierr = MPI_Alltoallw( sendbuf, sendcnts, sdispls, reinterpret_cast<MPI_Datatype*>(sendtypes), recvbuf, recvcnts, rdispls,
+ reinterpret_cast<MPI_Datatype*>(recvtypes), simgrid::smpi::Comm::f2c(*comm));
+}
+
+void mpi_exscan_ (void *sendbuf, void *recvbuf, int* count, int* datatype, int* op, int* comm, int* ierr){
+ *ierr = MPI_Exscan(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm));
+}
+
+}
--- /dev/null
+/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "private.h"
+#include "smpi_comm.hpp"
+#include "smpi_info.hpp"
+
+extern "C" { // This should really use the C linkage to be usable from Fortran
+
+void mpi_comm_rank_(int* comm, int* rank, int* ierr) {
+ *ierr = MPI_Comm_rank(simgrid::smpi::Comm::f2c(*comm), rank);
+}
+
+void mpi_comm_size_(int* comm, int* size, int* ierr) {
+ *ierr = MPI_Comm_size(simgrid::smpi::Comm::f2c(*comm), size);
+}
+
+void mpi_comm_dup_(int* comm, int* newcomm, int* ierr) {
+ MPI_Comm tmp;
+
+ *ierr = MPI_Comm_dup(simgrid::smpi::Comm::f2c(*comm), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *newcomm = tmp->add_f();
+ }
+}
+
+void mpi_comm_create_(int* comm, int* group, int* newcomm, int* ierr) {
+ MPI_Comm tmp;
+
+ *ierr = MPI_Comm_create(simgrid::smpi::Comm::f2c(*comm),simgrid::smpi::Group::f2c(*group), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *newcomm = tmp->add_f();
+ }
+}
+
+void mpi_comm_free_(int* comm, int* ierr) {
+ MPI_Comm tmp = simgrid::smpi::Comm::f2c(*comm);
+
+ *ierr = MPI_Comm_free(&tmp);
+
+ if(*ierr == MPI_SUCCESS) {
+ simgrid::smpi::Comm::free_f(*comm);
+ }
+}
+
+void mpi_comm_split_(int* comm, int* color, int* key, int* comm_out, int* ierr) {
+ MPI_Comm tmp;
+
+ *ierr = MPI_Comm_split(simgrid::smpi::Comm::f2c(*comm), *color, *key, &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *comm_out = tmp->add_f();
+ }
+}
+
+void mpi_comm_group_(int* comm, int* group_out, int* ierr) {
+ MPI_Group tmp;
+
+ *ierr = MPI_Comm_group(simgrid::smpi::Comm::f2c(*comm), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *group_out = tmp->c2f();
+ }
+}
+
+void mpi_comm_create_group_ (int* comm, int* group, int i, int* comm_out, int* ierr){
+ MPI_Comm tmp;
+ *ierr = MPI_Comm_create_group(simgrid::smpi::Comm::f2c(*comm),simgrid::smpi::Group::f2c(*group), i, &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *comm_out = tmp->c2f();
+ }
+}
+
+void mpi_comm_get_attr_ (int* comm, int* comm_keyval, void *attribute_val, int *flag, int* ierr){
+
+ *ierr = MPI_Comm_get_attr (simgrid::smpi::Comm::f2c(*comm), *comm_keyval, attribute_val, flag);
+}
+
+void mpi_comm_set_attr_ (int* comm, int* comm_keyval, void *attribute_val, int* ierr){
+
+ *ierr = MPI_Comm_set_attr ( simgrid::smpi::Comm::f2c(*comm), *comm_keyval, attribute_val);
+}
+
+void mpi_comm_delete_attr_ (int* comm, int* comm_keyval, int* ierr){
+
+ *ierr = MPI_Comm_delete_attr (simgrid::smpi::Comm::f2c(*comm), *comm_keyval);
+}
+
+void mpi_comm_create_keyval_ (void* copy_fn, void* delete_fn, int* keyval, void* extra_state, int* ierr){
+
+ *ierr = MPI_Comm_create_keyval(reinterpret_cast<MPI_Comm_copy_attr_function*>(copy_fn), reinterpret_cast<MPI_Comm_delete_attr_function*>(delete_fn),
+ keyval, extra_state) ;
+}
+
+void mpi_comm_free_keyval_ (int* keyval, int* ierr) {
+ *ierr = MPI_Comm_free_keyval( keyval);
+}
+
+void mpi_comm_get_name_ (int* comm, char* name, int* len, int* ierr){
+ *ierr = MPI_Comm_get_name(simgrid::smpi::Comm::f2c(*comm), name, len);
+ if(*len>0)
+ name[*len]=' ';
+}
+
+void mpi_comm_compare_ (int* comm1, int* comm2, int *result, int* ierr){
+
+ *ierr = MPI_Comm_compare(simgrid::smpi::Comm::f2c(*comm1), simgrid::smpi::Comm::f2c(*comm2), result);
+}
+
+void mpi_comm_disconnect_ (int* comm, int* ierr){
+ MPI_Comm tmp = simgrid::smpi::Comm::f2c(*comm);
+ *ierr = MPI_Comm_disconnect(&tmp);
+ if(*ierr == MPI_SUCCESS) {
+ simgrid::smpi::Comm::free_f(*comm);
+ }
+}
+
+void mpi_comm_set_errhandler_ (int* comm, void* errhandler, int* ierr) {
+ *ierr = MPI_Errhandler_set(simgrid::smpi::Comm::f2c(*comm), *static_cast<MPI_Errhandler*>(errhandler));
+}
+
+void mpi_comm_get_errhandler_ (int* comm, void* errhandler, int* ierr) {
+ *ierr = MPI_Errhandler_set(simgrid::smpi::Comm::f2c(*comm), static_cast<MPI_Errhandler*>(errhandler));
+}
+
+void mpi_comm_test_inter_ (int* comm, int* flag, int* ierr) {
+ *ierr = MPI_Comm_test_inter(simgrid::smpi::Comm::f2c(*comm), flag);
+}
+
+void mpi_comm_remote_group_ (int* comm, int* group, int* ierr) {
+ MPI_Group tmp;
+ *ierr = MPI_Comm_remote_group(simgrid::smpi::Comm::f2c(*comm), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *group = tmp->c2f();
+ }
+}
+
+void mpi_comm_remote_size_ (int* comm, int* size, int* ierr) {
+ *ierr = MPI_Comm_remote_size(simgrid::smpi::Comm::f2c(*comm), size);
+}
+
+void mpi_comm_set_name_ (int* comm, char* name, int* ierr, int size){
+ char* tname = xbt_new(char, size+1);
+ strncpy(tname, name, size);
+ tname[size]='\0';
+ *ierr = MPI_Comm_set_name (simgrid::smpi::Comm::f2c(*comm), tname);
+ xbt_free(tname);
+}
+
+void mpi_comm_dup_with_info_ (int* comm, int* info, int* newcomm, int* ierr){
+ MPI_Comm tmp;
+ *ierr = MPI_Comm_dup_with_info(simgrid::smpi::Comm::f2c(*comm), simgrid::smpi::Info::f2c(*info),&tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *newcomm = tmp->add_f();
+ }
+}
+
+void mpi_comm_split_type_ (int* comm, int* split_type, int* key, int* info, int* newcomm, int* ierr){
+ MPI_Comm tmp;
+ *ierr = MPI_Comm_split_type(simgrid::smpi::Comm::f2c(*comm), *split_type, *key, simgrid::smpi::Info::f2c(*info), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *newcomm = tmp->add_f();
+ }
+}
+
+void mpi_comm_set_info_ (int* comm, int* info, int* ierr){
+ *ierr = MPI_Comm_set_info (simgrid::smpi::Comm::f2c(*comm), simgrid::smpi::Info::f2c(*info));
+}
+
+void mpi_comm_get_info_ (int* comm, int* info, int* ierr){
+ MPI_Info tmp;
+ *ierr = MPI_Comm_get_info (simgrid::smpi::Comm::f2c(*comm), &tmp);
+ if(*ierr==MPI_SUCCESS){
+ *info = tmp->c2f();
+ }
+}
+
+void mpi_comm_create_errhandler_ ( void *function, void *errhandler, int* ierr){
+ *ierr = MPI_Comm_create_errhandler( reinterpret_cast<MPI_Comm_errhandler_fn*>(function), static_cast<MPI_Errhandler*>(errhandler));
+}
+
+void mpi_comm_call_errhandler_ (int* comm,int* errorcode, int* ierr){
+ *ierr = MPI_Comm_call_errhandler(simgrid::smpi::Comm::f2c(*comm), *errorcode);
+}
+
+void mpi_comm_connect_ ( char *port_name, int* info, int* root, int* comm, int*newcomm, int* ierr){
+ MPI_Comm tmp;
+ *ierr = MPI_Comm_connect( port_name, *reinterpret_cast<MPI_Info*>(info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *newcomm = tmp->add_f();
+ }
+}
+
+void mpi_comm_join_ ( int* fd, int* intercomm, int* ierr){
+ MPI_Comm tmp;
+ *ierr = MPI_Comm_join( *fd, &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *intercomm = tmp->add_f();
+ }
+}
+
+
+void mpi_comm_accept_ ( char *port_name, int* info, int* root, int* comm, int*newcomm, int* ierr){
+ MPI_Comm tmp;
+ *ierr = MPI_Comm_accept( port_name, *reinterpret_cast<MPI_Info*>(info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *newcomm = tmp->add_f();
+ }
+}
+
+void mpi_comm_spawn_ ( char *command, char *argv, int* maxprocs, int* info, int* root, int* comm, int* intercomm,
+ int* array_of_errcodes, int* ierr){
+ MPI_Comm tmp;
+ *ierr = MPI_Comm_spawn( command, nullptr, *maxprocs, *reinterpret_cast<MPI_Info*>(info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp,
+ array_of_errcodes);
+ if(*ierr == MPI_SUCCESS) {
+ *intercomm = tmp->add_f();
+ }
+}
+
+void mpi_comm_spawn_multiple_ ( int* count, char *array_of_commands, char** array_of_argv, int* array_of_maxprocs,
+ int* array_of_info, int* root,
+ int* comm, int* intercomm, int* array_of_errcodes, int* ierr){
+ MPI_Comm tmp;
+ *ierr = MPI_Comm_spawn_multiple(* count, &array_of_commands, &array_of_argv, array_of_maxprocs,
+ reinterpret_cast<MPI_Info*>(array_of_info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp, array_of_errcodes);
+ if(*ierr == MPI_SUCCESS) {
+ *intercomm = tmp->add_f();
+ }
+}
+
+void mpi_comm_get_parent_ ( int* parent, int* ierr){
+ MPI_Comm tmp;
+ *ierr = MPI_Comm_get_parent( &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *parent = tmp->c2f();
+ }
+}
+
+}
--- /dev/null
+/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "private.h"
+#include "smpi_comm.hpp"
+#include "smpi_datatype.hpp"
+#include "smpi_request.hpp"
+
+
+extern "C" { // This should really use the C linkage to be usable from Fortran
+
+void mpi_send_init_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr) {
+ MPI_Request req;
+ buf = static_cast<char *>(FORT_BOTTOM(buf));
+ *ierr = MPI_Send_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm), &req);
+ if(*ierr == MPI_SUCCESS) {
+ *request = req->add_f();
+ }
+}
+
+void mpi_isend_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr) {
+ MPI_Request req;
+ buf = static_cast<char *>(FORT_BOTTOM(buf));
+ *ierr = MPI_Isend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm), &req);
+ if(*ierr == MPI_SUCCESS) {
+ *request = req->add_f();
+ }
+}
+
+void mpi_irsend_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr) {
+ MPI_Request req;
+ buf = static_cast<char *>(FORT_BOTTOM(buf));
+ *ierr = MPI_Irsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm), &req);
+ if(*ierr == MPI_SUCCESS) {
+ *request = req->add_f();
+ }
+}
+
+void mpi_send_(void* buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* ierr) {
+ buf = static_cast<char *>(FORT_BOTTOM(buf));
+ *ierr = MPI_Send(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm));
+}
+
+void mpi_rsend_(void* buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* ierr) {
+ buf = static_cast<char *>(FORT_BOTTOM(buf));
+ *ierr = MPI_Rsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm));
+}
+
+void mpi_sendrecv_(void* sendbuf, int* sendcount, int* sendtype, int* dst, int* sendtag, void *recvbuf, int* recvcount,
+ int* recvtype, int* src, int* recvtag, int* comm, MPI_Status* status, int* ierr) {
+ sendbuf = static_cast<char *>( FORT_BOTTOM(sendbuf));
+ recvbuf = static_cast<char *>( FORT_BOTTOM(recvbuf));
+ *ierr = MPI_Sendrecv(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), *dst, *sendtag, recvbuf, *recvcount,
+ simgrid::smpi::Datatype::f2c(*recvtype), *src, *recvtag, simgrid::smpi::Comm::f2c(*comm), FORT_STATUS_IGNORE(status));
+}
+
+void mpi_recv_init_(void *buf, int* count, int* datatype, int* src, int* tag, int* comm, int* request, int* ierr) {
+ MPI_Request req;
+ buf = static_cast<char *>( FORT_BOTTOM(buf));
+ *ierr = MPI_Recv_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *src, *tag, simgrid::smpi::Comm::f2c(*comm), &req);
+ if(*ierr == MPI_SUCCESS) {
+ *request = req->add_f();
+ }
+}
+
+void mpi_irecv_(void *buf, int* count, int* datatype, int* src, int* tag, int* comm, int* request, int* ierr) {
+ MPI_Request req;
+ buf = static_cast<char *>( FORT_BOTTOM(buf));
+ *ierr = MPI_Irecv(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *src, *tag, simgrid::smpi::Comm::f2c(*comm), &req);
+ if(*ierr == MPI_SUCCESS) {
+ *request = req->add_f();
+ }
+}
+
+void mpi_recv_(void* buf, int* count, int* datatype, int* src, int* tag, int* comm, MPI_Status* status, int* ierr) {
+ buf = static_cast<char *>( FORT_BOTTOM(buf));
+ *ierr = MPI_Recv(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *src, *tag, simgrid::smpi::Comm::f2c(*comm), status);
+}
+
+void mpi_sendrecv_replace_ (void *buf, int* count, int* datatype, int* dst, int* sendtag, int* src, int* recvtag,
+ int* comm, MPI_Status* status, int* ierr)
+{
+ *ierr = MPI_Sendrecv_replace(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *sendtag, *src,
+ *recvtag, simgrid::smpi::Comm::f2c(*comm), FORT_STATUS_IGNORE(status));
+}
+
+void mpi_ssend_ (void* buf, int* count, int* datatype, int* dest, int* tag, int* comm, int* ierr) {
+ *ierr = MPI_Ssend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm));
+}
+
+void mpi_ssend_init_ (void* buf, int* count, int* datatype, int* dest, int* tag, int* comm, int* request, int* ierr) {
+ MPI_Request tmp;
+ *ierr = MPI_Ssend_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *request = tmp->add_f();
+ }
+}
+
+void mpi_bsend_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* ierr) {
+ *ierr = MPI_Bsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm));
+}
+
+void mpi_bsend_init_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) {
+ MPI_Request tmp;
+ *ierr = MPI_Bsend_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *request = tmp->add_f();
+ }
+}
+
+void mpi_ibsend_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) {
+ MPI_Request tmp;
+ *ierr = MPI_Ibsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *request = tmp->add_f();
+ }
+}
+
+void mpi_issend_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) {
+ MPI_Request tmp;
+ *ierr = MPI_Issend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *request = tmp->add_f();
+ }
+}
+
+void mpi_rsend_init_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) {
+ MPI_Request tmp;
+ *ierr = MPI_Rsend_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *request = tmp->add_f();
+ }
+}
+
+void mpi_start_(int* request, int* ierr) {
+ MPI_Request req = simgrid::smpi::Request::f2c(*request);
+
+ *ierr = MPI_Start(&req);
+}
+
+void mpi_startall_(int* count, int* requests, int* ierr) {
+ MPI_Request* reqs;
+ int i;
+
+ reqs = xbt_new(MPI_Request, *count);
+ for(i = 0; i < *count; i++) {
+ reqs[i] = simgrid::smpi::Request::f2c(requests[i]);
+ }
+ *ierr = MPI_Startall(*count, reqs);
+ xbt_free(reqs);
+}
+
+void mpi_wait_(int* request, MPI_Status* status, int* ierr) {
+ MPI_Request req = simgrid::smpi::Request::f2c(*request);
+
+ *ierr = MPI_Wait(&req, FORT_STATUS_IGNORE(status));
+ if(req==MPI_REQUEST_NULL){
+ simgrid::smpi::Request::free_f(*request);
+ *request=MPI_FORTRAN_REQUEST_NULL;
+ }
+}
+
+void mpi_waitany_(int* count, int* requests, int* index, MPI_Status* status, int* ierr) {
+ MPI_Request* reqs;
+ int i;
+
+ reqs = xbt_new(MPI_Request, *count);
+ for(i = 0; i < *count; i++) {
+ reqs[i] = simgrid::smpi::Request::f2c(requests[i]);
+ }
+ *ierr = MPI_Waitany(*count, reqs, index, status);
+ if(reqs[*index]==MPI_REQUEST_NULL){
+ simgrid::smpi::Request::free_f(requests[*index]);
+ requests[*index]=MPI_FORTRAN_REQUEST_NULL;
+ }
+ xbt_free(reqs);
+}
+
+void mpi_waitall_(int* count, int* requests, MPI_Status* status, int* ierr) {
+ MPI_Request* reqs;
+ int i;
+
+ reqs = xbt_new(MPI_Request, *count);
+ for(i = 0; i < *count; i++) {
+ reqs[i] = simgrid::smpi::Request::f2c(requests[i]);
+ }
+ *ierr = MPI_Waitall(*count, reqs, FORT_STATUSES_IGNORE(status));
+ for(i = 0; i < *count; i++) {
+ if(reqs[i]==MPI_REQUEST_NULL){
+ simgrid::smpi::Request::free_f(requests[i]);
+ requests[i]=MPI_FORTRAN_REQUEST_NULL;
+ }
+ }
+
+ xbt_free(reqs);
+}
+
+void mpi_waitsome_ (int* incount, int* requests, int *outcount, int *indices, MPI_Status* status, int* ierr)
+{
+ MPI_Request* reqs;
+ int i;
+
+ reqs = xbt_new(MPI_Request, *incount);
+ for(i = 0; i < *incount; i++) {
+ reqs[i] = simgrid::smpi::Request::f2c(requests[i]);
+ }
+ *ierr = MPI_Waitsome(*incount, reqs, outcount, indices, status);
+ for(i=0;i<*outcount;i++){
+ if(reqs[indices[i]]==MPI_REQUEST_NULL){
+ simgrid::smpi::Request::free_f(requests[indices[i]]);
+ requests[indices[i]]=MPI_FORTRAN_REQUEST_NULL;
+ }
+ }
+ xbt_free(reqs);
+}
+
+void mpi_test_ (int * request, int *flag, MPI_Status * status, int* ierr){
+ MPI_Request req = simgrid::smpi::Request::f2c(*request);
+ *ierr= MPI_Test(&req, flag, FORT_STATUS_IGNORE(status));
+ if(req==MPI_REQUEST_NULL){
+ simgrid::smpi::Request::free_f(*request);
+ *request=MPI_FORTRAN_REQUEST_NULL;
+ }
+}
+
+void mpi_testall_ (int* count, int * requests, int *flag, MPI_Status * statuses, int* ierr){
+ int i;
+ MPI_Request* reqs = xbt_new(MPI_Request, *count);
+ for(i = 0; i < *count; i++) {
+ reqs[i] = simgrid::smpi::Request::f2c(requests[i]);
+ }
+ *ierr= MPI_Testall(*count, reqs, flag, FORT_STATUSES_IGNORE(statuses));
+ for(i = 0; i < *count; i++) {
+ if(reqs[i]==MPI_REQUEST_NULL){
+ simgrid::smpi::Request::free_f(requests[i]);
+ requests[i]=MPI_FORTRAN_REQUEST_NULL;
+ }
+ }
+ xbt_free(reqs);
+}
+
+void mpi_testany_ (int* count, int* requests, int *index, int *flag, MPI_Status* status, int* ierr)
+{
+ MPI_Request* reqs;
+ int i;
+
+ reqs = xbt_new(MPI_Request, *count);
+ for(i = 0; i < *count; i++) {
+ reqs[i] = simgrid::smpi::Request::f2c(requests[i]);
+ }
+ *ierr = MPI_Testany(*count, reqs, index, flag, FORT_STATUS_IGNORE(status));
+ if(*index!=MPI_UNDEFINED && reqs[*index]==MPI_REQUEST_NULL){
+ simgrid::smpi::Request::free_f(requests[*index]);
+ requests[*index]=MPI_FORTRAN_REQUEST_NULL;
+ }
+ xbt_free(reqs);
+}
+
+void mpi_testsome_ (int* incount, int* requests, int* outcount, int* indices, MPI_Status* statuses, int* ierr) {
+ MPI_Request* reqs;
+ int i;
+
+ reqs = xbt_new(MPI_Request, *incount);
+ for(i = 0; i < *incount; i++) {
+ reqs[i] = simgrid::smpi::Request::f2c(requests[i]);
+ indices[i]=0;
+ }
+ *ierr = MPI_Testsome(*incount, reqs, outcount, indices, FORT_STATUSES_IGNORE(statuses));
+ for(i=0;i<*incount;i++){
+ if(indices[i] && reqs[indices[i]]==MPI_REQUEST_NULL){
+ simgrid::smpi::Request::free_f(requests[indices[i]]);
+ requests[indices[i]]=MPI_FORTRAN_REQUEST_NULL;
+ }
+ }
+ xbt_free(reqs);
+}
+
+void mpi_probe_ (int* source, int* tag, int* comm, MPI_Status* status, int* ierr) {
+ *ierr = MPI_Probe(*source, *tag, simgrid::smpi::Comm::f2c(*comm), FORT_STATUS_IGNORE(status));
+}
+
+
+void mpi_iprobe_ (int* source, int* tag, int* comm, int* flag, MPI_Status* status, int* ierr) {
+ *ierr = MPI_Iprobe(*source, *tag, simgrid::smpi::Comm::f2c(*comm), flag, status);
+}
+
+}
--- /dev/null
+/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "private.h"
+#include "smpi_comm.hpp"
+#include "smpi_datatype.hpp"
+
+extern "C" { // This should really use the C linkage to be usable from Fortran
+
+void mpi_type_extent_(int* datatype, MPI_Aint * extent, int* ierr){
+ *ierr= MPI_Type_extent(simgrid::smpi::Datatype::f2c(*datatype), extent);
+}
+
+void mpi_type_free_(int* datatype, int* ierr){
+ MPI_Datatype tmp= simgrid::smpi::Datatype::f2c(*datatype);
+ *ierr= MPI_Type_free (&tmp);
+ if(*ierr == MPI_SUCCESS) {
+ simgrid::smpi::F2C::free_f(*datatype);
+ }
+}
+
+void mpi_type_ub_(int* datatype, MPI_Aint * disp, int* ierr){
+ *ierr= MPI_Type_ub(simgrid::smpi::Datatype::f2c(*datatype), disp);
+}
+
+void mpi_type_lb_(int* datatype, MPI_Aint * extent, int* ierr){
+ *ierr= MPI_Type_extent(simgrid::smpi::Datatype::f2c(*datatype), extent);
+}
+
+void mpi_type_size_(int* datatype, int *size, int* ierr)
+{
+ *ierr = MPI_Type_size(simgrid::smpi::Datatype::f2c(*datatype), size);
+}
+
+void mpi_type_dup_ (int* datatype, int* newdatatype, int* ierr){
+ MPI_Datatype tmp;
+ *ierr = MPI_Type_dup(simgrid::smpi::Datatype::f2c(*datatype), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *newdatatype = tmp->add_f();
+ }
+}
+
+void mpi_type_set_name_ (int* datatype, char * name, int* ierr, int size){
+ char* tname = xbt_new(char, size+1);
+ strncpy(tname, name, size);
+ tname[size]='\0';
+ *ierr = MPI_Type_set_name(simgrid::smpi::Datatype::f2c(*datatype), tname);
+ xbt_free(tname);
+}
+
+void mpi_type_get_name_ (int* datatype, char * name, int* len, int* ierr){
+ *ierr = MPI_Type_get_name(simgrid::smpi::Datatype::f2c(*datatype),name,len);
+ if(*len>0)
+ name[*len]=' ';
+}
+
+void mpi_type_get_attr_ (int* type, int* type_keyval, void *attribute_val, int* flag, int* ierr){
+
+ *ierr = MPI_Type_get_attr ( simgrid::smpi::Datatype::f2c(*type), *type_keyval, attribute_val,flag);
+}
+
+void mpi_type_set_attr_ (int* type, int* type_keyval, void *attribute_val, int* ierr){
+
+ *ierr = MPI_Type_set_attr ( simgrid::smpi::Datatype::f2c(*type), *type_keyval, attribute_val);
+}
+
+void mpi_type_delete_attr_ (int* type, int* type_keyval, int* ierr){
+
+ *ierr = MPI_Type_delete_attr ( simgrid::smpi::Datatype::f2c(*type), *type_keyval);
+}
+
+void mpi_type_create_keyval_ (void* copy_fn, void* delete_fn, int* keyval, void* extra_state, int* ierr){
+
+ *ierr = MPI_Type_create_keyval(reinterpret_cast<MPI_Type_copy_attr_function*>(copy_fn), reinterpret_cast<MPI_Type_delete_attr_function*>(delete_fn),
+ keyval, extra_state) ;
+}
+
+void mpi_type_free_keyval_ (int* keyval, int* ierr) {
+ *ierr = MPI_Type_free_keyval( keyval);
+}
+
+void mpi_type_get_extent_ (int* datatype, MPI_Aint * lb, MPI_Aint * extent, int* ierr){
+
+ *ierr = MPI_Type_get_extent(simgrid::smpi::Datatype::f2c(*datatype), lb, extent);
+}
+
+void mpi_type_get_true_extent_ (int* datatype, MPI_Aint * lb, MPI_Aint * extent, int* ierr){
+
+ *ierr = MPI_Type_get_true_extent(simgrid::smpi::Datatype::f2c(*datatype), lb, extent);
+}
+
+void mpi_type_commit_(int* datatype, int* ierr){
+ MPI_Datatype tmp= simgrid::smpi::Datatype::f2c(*datatype);
+ *ierr= MPI_Type_commit(&tmp);
+}
+
+void mpi_type_contiguous_ (int* count, int* old_type, int* newtype, int* ierr) {
+ MPI_Datatype tmp;
+ *ierr = MPI_Type_contiguous(*count, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *newtype = tmp->add_f();
+ }
+}
+
+void mpi_type_vector_(int* count, int* blocklen, int* stride, int* old_type, int* newtype, int* ierr){
+ MPI_Datatype tmp;
+ *ierr= MPI_Type_vector(*count, *blocklen, *stride, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *newtype = tmp->add_f();
+ }
+}
+
+void mpi_type_hvector_(int* count, int* blocklen, MPI_Aint* stride, int* old_type, int* newtype, int* ierr){
+ MPI_Datatype tmp;
+ *ierr= MPI_Type_hvector (*count, *blocklen, *stride, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *newtype = tmp->add_f();
+ }
+}
+
+void mpi_type_create_hvector_(int* count, int* blocklen, MPI_Aint* stride, int* old_type, int* newtype, int* ierr){
+ MPI_Datatype tmp;
+ *ierr= MPI_Type_hvector(*count, *blocklen, *stride, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *newtype = tmp->add_f();
+ }
+}
+
+void mpi_type_hindexed_ (int* count, int* blocklens, MPI_Aint* indices, int* old_type, int* newtype, int* ierr) {
+ MPI_Datatype tmp;
+ *ierr = MPI_Type_hindexed(*count, blocklens, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *newtype = tmp->add_f();
+ }
+}
+
+void mpi_type_create_hindexed_(int* count, int* blocklens, MPI_Aint* indices, int* old_type, int* newtype, int* ierr){
+ MPI_Datatype tmp;
+ *ierr = MPI_Type_create_hindexed(*count, blocklens, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *newtype = tmp->add_f();
+ }
+}
+
+void mpi_type_create_hindexed_block_ (int* count, int* blocklength, MPI_Aint* indices, int* old_type, int* newtype,
+ int* ierr) {
+ MPI_Datatype tmp;
+ *ierr = MPI_Type_create_hindexed_block(*count, *blocklength, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *newtype = tmp->add_f();
+ }
+}
+
+void mpi_type_indexed_ (int* count, int* blocklens, int* indices, int* old_type, int* newtype, int* ierr) {
+ MPI_Datatype tmp;
+ *ierr = MPI_Type_indexed(*count, blocklens, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *newtype = tmp->add_f();
+ }
+}
+
+void mpi_type_create_indexed_(int* count, int* blocklens, int* indices, int* old_type, int* newtype, int* ierr){
+ MPI_Datatype tmp;
+ *ierr = MPI_Type_create_indexed(*count, blocklens, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *newtype = tmp->add_f();
+ }
+}
+
+void mpi_type_create_indexed_block_ (int* count, int* blocklength, int* indices, int* old_type, int*newtype,
+ int* ierr){
+ MPI_Datatype tmp;
+ *ierr = MPI_Type_create_indexed_block(*count, *blocklength, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *newtype = tmp->add_f();
+ }
+}
+
+void mpi_type_struct_ (int* count, int* blocklens, MPI_Aint* indices, int* old_types, int* newtype, int* ierr) {
+ MPI_Datatype tmp;
+ int i=0;
+ MPI_Datatype* types = static_cast<MPI_Datatype*>(xbt_malloc(*count*sizeof(MPI_Datatype)));
+ for(i=0; i< *count; i++){
+ types[i] = simgrid::smpi::Datatype::f2c(old_types[i]);
+ }
+ *ierr = MPI_Type_struct(*count, blocklens, indices, types, &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *newtype = tmp->add_f();
+ }
+ xbt_free(types);
+}
+
+void mpi_type_create_struct_(int* count, int* blocklens, MPI_Aint* indices, int* old_types, int* newtype, int* ierr){
+ MPI_Datatype tmp;
+ int i=0;
+ MPI_Datatype* types = static_cast<MPI_Datatype*>(xbt_malloc(*count*sizeof(MPI_Datatype)));
+ for(i=0; i< *count; i++){
+ types[i] = simgrid::smpi::Datatype::f2c(old_types[i]);
+ }
+ *ierr = MPI_Type_create_struct(*count, blocklens, indices, types, &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *newtype = tmp->add_f();
+ }
+ xbt_free(types);
+}
+
+void mpi_pack_ (void* inbuf, int* incount, int* type, void* outbuf, int* outcount, int* position, int* comm, int* ierr) {
+ *ierr = MPI_Pack(inbuf, *incount, simgrid::smpi::Datatype::f2c(*type), outbuf, *outcount, position, simgrid::smpi::Comm::f2c(*comm));
+}
+
+void mpi_unpack_ (void* inbuf, int* insize, int* position, void* outbuf, int* outcount, int* type, int* comm,
+ int* ierr) {
+ *ierr = MPI_Unpack(inbuf, *insize, position, outbuf, *outcount, simgrid::smpi::Datatype::f2c(*type), simgrid::smpi::Comm::f2c(*comm));
+}
+
+void mpi_pack_external_size_ (char *datarep, int* incount, int* datatype, MPI_Aint *size, int* ierr){
+ *ierr = MPI_Pack_external_size(datarep, *incount, simgrid::smpi::Datatype::f2c(*datatype), size);
+}
+
+void mpi_pack_external_ (char *datarep, void *inbuf, int* incount, int* datatype, void *outbuf, MPI_Aint* outcount,
+ MPI_Aint *position, int* ierr){
+ *ierr = MPI_Pack_external(datarep, inbuf, *incount, simgrid::smpi::Datatype::f2c(*datatype), outbuf, *outcount, position);
+}
+
+void mpi_unpack_external_ ( char *datarep, void *inbuf, MPI_Aint* insize, MPI_Aint *position, void *outbuf,
+ int* outcount, int* datatype, int* ierr){
+ *ierr = MPI_Unpack_external( datarep, inbuf, *insize, position, outbuf, *outcount, simgrid::smpi::Datatype::f2c(*datatype));
+}
+
+
+void mpi_type_get_envelope_ ( int* datatype, int *num_integers, int *num_addresses, int *num_datatypes, int *combiner,
+ int* ierr){
+ *ierr = MPI_Type_get_envelope( simgrid::smpi::Datatype::f2c(*datatype), num_integers,
+ num_addresses, num_datatypes, combiner);
+}
+
+void mpi_type_get_contents_ (int* datatype, int* max_integers, int* max_addresses, int* max_datatypes,
+ int* array_of_integers, MPI_Aint* array_of_addresses,
+ int* array_of_datatypes, int* ierr){
+ *ierr = MPI_Type_get_contents(simgrid::smpi::Datatype::f2c(*datatype), *max_integers, *max_addresses,*max_datatypes,
+ array_of_integers, array_of_addresses, reinterpret_cast<MPI_Datatype*>(array_of_datatypes));
+}
+
+void mpi_type_create_darray_ (int* size, int* rank, int* ndims, int* array_of_gsizes, int* array_of_distribs,
+ int* array_of_dargs, int* array_of_psizes,
+ int* order, int* oldtype, int*newtype, int* ierr) {
+ MPI_Datatype tmp;
+ *ierr = MPI_Type_create_darray(*size, *rank, *ndims, array_of_gsizes,
+ array_of_distribs, array_of_dargs, array_of_psizes,
+ *order, simgrid::smpi::Datatype::f2c(*oldtype), &tmp) ;
+ if(*ierr == MPI_SUCCESS) {
+ *newtype = tmp->add_f();
+ }
+}
+
+void mpi_type_create_resized_ (int* oldtype,MPI_Aint* lb, MPI_Aint* extent, int*newtype, int* ierr){
+ MPI_Datatype tmp;
+ *ierr = MPI_Type_create_resized(simgrid::smpi::Datatype::f2c(*oldtype),*lb, *extent, &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *newtype = tmp->add_f();
+ }
+}
+
+void mpi_type_create_subarray_ (int* ndims,int *array_of_sizes, int *array_of_subsizes, int *array_of_starts,
+ int* order, int* oldtype, int*newtype, int* ierr){
+ MPI_Datatype tmp;
+ *ierr = MPI_Type_create_subarray(*ndims,array_of_sizes, array_of_subsizes, array_of_starts, *order,
+ simgrid::smpi::Datatype::f2c(*oldtype), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *newtype = tmp->add_f();
+ }
+}
+
+void mpi_type_match_size_ (int* typeclass,int* size,int* datatype, int* ierr){
+ MPI_Datatype tmp;
+ *ierr = MPI_Type_match_size(*typeclass,*size,&tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *datatype = tmp->c2f();
+ }
+}
+
+
+}
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include <climits>
-
#include "simgrid/s4u/Engine.hpp"
#include "simgrid/s4u/Host.hpp"
#include "private.h"
#include "smpi_comm.hpp"
-#include "smpi_coll.hpp"
#include "smpi_datatype_derived.hpp"
-#include "smpi_op.hpp"
#include "smpi_process.hpp"
-#include "smpi_request.hpp"
#include "smpi_status.hpp"
-#include "smpi_win.hpp"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_pmpi, smpi, "Logging specific to SMPI (pmpi)");
return PMPI_Address(location, address);
}
-int PMPI_Type_free(MPI_Datatype * datatype)
-{
- /* Free a predefined datatype is an error according to the standard, and should be checked for */
- if (*datatype == MPI_DATATYPE_NULL) {
- return MPI_ERR_ARG;
- } else {
- simgrid::smpi::Datatype::unref(*datatype);
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Type_size(MPI_Datatype datatype, int *size)
-{
- if (datatype == MPI_DATATYPE_NULL) {
- return MPI_ERR_TYPE;
- } else if (size == nullptr) {
- return MPI_ERR_ARG;
- } else {
- *size = static_cast<int>(datatype->size());
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Type_size_x(MPI_Datatype datatype, MPI_Count *size)
-{
- if (datatype == MPI_DATATYPE_NULL) {
- return MPI_ERR_TYPE;
- } else if (size == nullptr) {
- return MPI_ERR_ARG;
- } else {
- *size = static_cast<MPI_Count>(datatype->size());
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Type_get_extent(MPI_Datatype datatype, MPI_Aint * lb, MPI_Aint * extent)
-{
- if (datatype == MPI_DATATYPE_NULL) {
- return MPI_ERR_TYPE;
- } else if (lb == nullptr || extent == nullptr) {
- return MPI_ERR_ARG;
- } else {
- return datatype->extent(lb, extent);
- }
-}
-
-int PMPI_Type_get_true_extent(MPI_Datatype datatype, MPI_Aint * lb, MPI_Aint * extent)
+int PMPI_Get_processor_name(char *name, int *resultlen)
{
- return PMPI_Type_get_extent(datatype, lb, extent);
-}
+ strncpy(name, sg_host_self()->getCname(), strlen(sg_host_self()->getCname()) < MPI_MAX_PROCESSOR_NAME - 1
+ ? strlen(sg_host_self()->getCname()) + 1
+ : MPI_MAX_PROCESSOR_NAME - 1);
+ *resultlen = strlen(name) > MPI_MAX_PROCESSOR_NAME ? MPI_MAX_PROCESSOR_NAME : strlen(name);
-int PMPI_Type_extent(MPI_Datatype datatype, MPI_Aint * extent)
-{
- if (datatype == MPI_DATATYPE_NULL) {
- return MPI_ERR_TYPE;
- } else if (extent == nullptr) {
- return MPI_ERR_ARG;
- } else {
- *extent = datatype->get_extent();
- return MPI_SUCCESS;
- }
+ return MPI_SUCCESS;
}
-int PMPI_Type_lb(MPI_Datatype datatype, MPI_Aint * disp)
+int PMPI_Get_count(MPI_Status * status, MPI_Datatype datatype, int *count)
{
- if (datatype == MPI_DATATYPE_NULL) {
- return MPI_ERR_TYPE;
- } else if (disp == nullptr) {
+ if (status == nullptr || count == nullptr) {
return MPI_ERR_ARG;
- } else {
- *disp = datatype->lb();
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Type_ub(MPI_Datatype datatype, MPI_Aint * disp)
-{
- if (datatype == MPI_DATATYPE_NULL) {
+ } else if (not datatype->is_valid()) {
return MPI_ERR_TYPE;
- } else if (disp == nullptr) {
- return MPI_ERR_ARG;
- } else {
- *disp = datatype->ub();
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Type_dup(MPI_Datatype datatype, MPI_Datatype *newtype){
- int retval = MPI_SUCCESS;
- if (datatype == MPI_DATATYPE_NULL) {
- retval=MPI_ERR_TYPE;
- } else {
- *newtype = new simgrid::smpi::Datatype(datatype, &retval);
- //error when duplicating, free the new datatype
- if(retval!=MPI_SUCCESS){
- simgrid::smpi::Datatype::unref(*newtype);
- *newtype = MPI_DATATYPE_NULL;
- }
- }
- return retval;
-}
-
-int PMPI_Op_create(MPI_User_function * function, int commute, MPI_Op * op)
-{
- if (function == nullptr || op == nullptr) {
- return MPI_ERR_ARG;
- } else {
- *op = new simgrid::smpi::Op(function, (commute!=0));
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Op_free(MPI_Op * op)
-{
- if (op == nullptr) {
- return MPI_ERR_ARG;
- } else if (*op == MPI_OP_NULL) {
- return MPI_ERR_OP;
- } else {
- delete (*op);
- *op = MPI_OP_NULL;
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Op_commutative(MPI_Op op, int* commute){
- if (op == MPI_OP_NULL) {
- return MPI_ERR_OP;
- } else if (commute==nullptr){
- return MPI_ERR_ARG;
- } else {
- *commute = op->is_commutative();
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Group_free(MPI_Group * group)
-{
- if (group == nullptr) {
- return MPI_ERR_ARG;
- } else {
- if(*group != MPI_COMM_WORLD->group() && *group != MPI_GROUP_EMPTY)
- simgrid::smpi::Group::unref(*group);
- *group = MPI_GROUP_NULL;
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Group_size(MPI_Group group, int *size)
-{
- if (group == MPI_GROUP_NULL) {
- return MPI_ERR_GROUP;
- } else if (size == nullptr) {
- return MPI_ERR_ARG;
- } else {
- *size = group->size();
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Group_rank(MPI_Group group, int *rank)
-{
- if (group == MPI_GROUP_NULL) {
- return MPI_ERR_GROUP;
- } else if (rank == nullptr) {
- return MPI_ERR_ARG;
- } else {
- *rank = group->rank(smpi_process()->index());
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Group_translate_ranks(MPI_Group group1, int n, int *ranks1, MPI_Group group2, int *ranks2)
-{
- if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) {
- return MPI_ERR_GROUP;
- } else {
- for (int i = 0; i < n; i++) {
- if(ranks1[i]==MPI_PROC_NULL){
- ranks2[i]=MPI_PROC_NULL;
- }else{
- int index = group1->index(ranks1[i]);
- ranks2[i] = group2->rank(index);
- }
- }
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Group_compare(MPI_Group group1, MPI_Group group2, int *result)
-{
- if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) {
- return MPI_ERR_GROUP;
- } else if (result == nullptr) {
- return MPI_ERR_ARG;
- } else {
- *result = group1->compare(group2);
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Group_union(MPI_Group group1, MPI_Group group2, MPI_Group * newgroup)
-{
-
- if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) {
- return MPI_ERR_GROUP;
- } else if (newgroup == nullptr) {
- return MPI_ERR_ARG;
- } else {
- return group1->group_union(group2, newgroup);
- }
-}
-
-int PMPI_Group_intersection(MPI_Group group1, MPI_Group group2, MPI_Group * newgroup)
-{
-
- if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) {
- return MPI_ERR_GROUP;
- } else if (newgroup == nullptr) {
- return MPI_ERR_ARG;
- } else {
- return group1->intersection(group2,newgroup);
- }
-}
-
-int PMPI_Group_difference(MPI_Group group1, MPI_Group group2, MPI_Group * newgroup)
-{
- if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) {
- return MPI_ERR_GROUP;
- } else if (newgroup == nullptr) {
- return MPI_ERR_ARG;
- } else {
- return group1->difference(group2,newgroup);
- }
-}
-
-int PMPI_Group_incl(MPI_Group group, int n, int *ranks, MPI_Group * newgroup)
-{
- if (group == MPI_GROUP_NULL) {
- return MPI_ERR_GROUP;
- } else if (newgroup == nullptr) {
- return MPI_ERR_ARG;
- } else {
- return group->incl(n, ranks, newgroup);
- }
-}
-
-int PMPI_Group_excl(MPI_Group group, int n, int *ranks, MPI_Group * newgroup)
-{
- if (group == MPI_GROUP_NULL) {
- return MPI_ERR_GROUP;
- } else if (newgroup == nullptr) {
- return MPI_ERR_ARG;
- } else {
- if (n == 0) {
- *newgroup = group;
- if (group != MPI_COMM_WORLD->group()
- && group != MPI_COMM_SELF->group() && group != MPI_GROUP_EMPTY)
- group->ref();
- return MPI_SUCCESS;
- } else if (n == group->size()) {
- *newgroup = MPI_GROUP_EMPTY;
- return MPI_SUCCESS;
- } else {
- return group->excl(n,ranks,newgroup);
- }
- }
-}
-
-int PMPI_Group_range_incl(MPI_Group group, int n, int ranges[][3], MPI_Group * newgroup)
-{
- if (group == MPI_GROUP_NULL) {
- return MPI_ERR_GROUP;
- } else if (newgroup == nullptr) {
- return MPI_ERR_ARG;
} else {
- if (n == 0) {
- *newgroup = MPI_GROUP_EMPTY;
+ size_t size = datatype->size();
+ if (size == 0) {
+ *count = 0;
return MPI_SUCCESS;
+ } else if (status->count % size != 0) {
+ return MPI_UNDEFINED;
} else {
- return group->range_incl(n,ranges,newgroup);
- }
- }
-}
-
-int PMPI_Group_range_excl(MPI_Group group, int n, int ranges[][3], MPI_Group * newgroup)
-{
- if (group == MPI_GROUP_NULL) {
- return MPI_ERR_GROUP;
- } else if (newgroup == nullptr) {
- return MPI_ERR_ARG;
- } else {
- if (n == 0) {
- *newgroup = group;
- if (group != MPI_COMM_WORLD->group() && group != MPI_COMM_SELF->group() &&
- group != MPI_GROUP_EMPTY)
- group->ref();
+ *count = simgrid::smpi::Status::get_count(status, datatype);
return MPI_SUCCESS;
- } else {
- return group->range_excl(n,ranges,newgroup);
- }
- }
-}
-
-int PMPI_Comm_rank(MPI_Comm comm, int *rank)
-{
- if (comm == MPI_COMM_NULL) {
- return MPI_ERR_COMM;
- } else if (rank == nullptr) {
- return MPI_ERR_ARG;
- } else {
- *rank = comm->rank();
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Comm_size(MPI_Comm comm, int *size)
-{
- if (comm == MPI_COMM_NULL) {
- return MPI_ERR_COMM;
- } else if (size == nullptr) {
- return MPI_ERR_ARG;
- } else {
- *size = comm->size();
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Comm_get_name (MPI_Comm comm, char* name, int* len)
-{
- if (comm == MPI_COMM_NULL) {
- return MPI_ERR_COMM;
- } else if (name == nullptr || len == nullptr) {
- return MPI_ERR_ARG;
- } else {
- comm->get_name(name, len);
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Comm_group(MPI_Comm comm, MPI_Group * group)
-{
- if (comm == MPI_COMM_NULL) {
- return MPI_ERR_COMM;
- } else if (group == nullptr) {
- return MPI_ERR_ARG;
- } else {
- *group = comm->group();
- if (*group != MPI_COMM_WORLD->group() && *group != MPI_GROUP_NULL && *group != MPI_GROUP_EMPTY)
- (*group)->ref();
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Comm_compare(MPI_Comm comm1, MPI_Comm comm2, int *result)
-{
- if (comm1 == MPI_COMM_NULL || comm2 == MPI_COMM_NULL) {
- return MPI_ERR_COMM;
- } else if (result == nullptr) {
- return MPI_ERR_ARG;
- } else {
- if (comm1 == comm2) { /* Same communicators means same groups */
- *result = MPI_IDENT;
- } else {
- *result = comm1->group()->compare(comm2->group());
- if (*result == MPI_IDENT) {
- *result = MPI_CONGRUENT;
- }
}
- return MPI_SUCCESS;
}
}
-int PMPI_Comm_dup(MPI_Comm comm, MPI_Comm * newcomm)
-{
- if (comm == MPI_COMM_NULL) {
- return MPI_ERR_COMM;
- } else if (newcomm == nullptr) {
- return MPI_ERR_ARG;
- } else {
- return comm->dup(newcomm);
- }
+int PMPI_Initialized(int* flag) {
+ *flag=(smpi_process()!=nullptr && smpi_process()->initialized());
+ return MPI_SUCCESS;
}
-int PMPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm * newcomm)
-{
- if (comm == MPI_COMM_NULL) {
- return MPI_ERR_COMM;
- } else if (group == MPI_GROUP_NULL) {
- return MPI_ERR_GROUP;
- } else if (newcomm == nullptr) {
- return MPI_ERR_ARG;
- } else if(group->rank(smpi_process()->index())==MPI_UNDEFINED){
- *newcomm= MPI_COMM_NULL;
- return MPI_SUCCESS;
- }else{
- group->ref();
- *newcomm = new simgrid::smpi::Comm(group, nullptr);
+int PMPI_Alloc_mem(MPI_Aint size, MPI_Info info, void *baseptr){
+ void *ptr = xbt_malloc(size);
+ if(ptr==nullptr)
+ return MPI_ERR_NO_MEM;
+ else {
+ *static_cast<void**>(baseptr) = ptr;
return MPI_SUCCESS;
}
}
-int PMPI_Comm_free(MPI_Comm * comm)
-{
- if (comm == nullptr) {
- return MPI_ERR_ARG;
- } else if (*comm == MPI_COMM_NULL) {
- return MPI_ERR_COMM;
- } else {
- simgrid::smpi::Comm::destroy(*comm);
- *comm = MPI_COMM_NULL;
- return MPI_SUCCESS;
- }
+int PMPI_Free_mem(void *baseptr){
+ xbt_free(baseptr);
+ return MPI_SUCCESS;
}
-int PMPI_Comm_disconnect(MPI_Comm * comm)
-{
- /* TODO: wait until all communication in comm are done */
- if (comm == nullptr) {
- return MPI_ERR_ARG;
- } else if (*comm == MPI_COMM_NULL) {
- return MPI_ERR_COMM;
- } else {
- simgrid::smpi::Comm::destroy(*comm);
- *comm = MPI_COMM_NULL;
- return MPI_SUCCESS;
- }
+int PMPI_Error_class(int errorcode, int* errorclass) {
+ // assume smpi uses only standard mpi error codes
+ *errorclass=errorcode;
+ return MPI_SUCCESS;
}
-int PMPI_Comm_split(MPI_Comm comm, int color, int key, MPI_Comm* comm_out)
-{
- int retval = 0;
- smpi_bench_end();
-
- if (comm_out == nullptr) {
- retval = MPI_ERR_ARG;
- } else if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else {
- *comm_out = comm->split(color, key);
- retval = MPI_SUCCESS;
- }
- smpi_bench_begin();
-
- return retval;
-}
-
-int PMPI_Comm_create_group(MPI_Comm comm, MPI_Group group, int, MPI_Comm* comm_out)
-{
- int retval = 0;
- smpi_bench_end();
-
- if (comm_out == nullptr) {
- retval = MPI_ERR_ARG;
- } else if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else {
- retval = MPI_Comm_create(comm, group, comm_out);
- }
- smpi_bench_begin();
-
- return retval;
-}
-
-int PMPI_Send_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request * request)
-{
- int retval = 0;
-
- smpi_bench_end();
- if (request == nullptr) {
- retval = MPI_ERR_ARG;
- } else if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if (not datatype->is_valid()) {
- retval = MPI_ERR_TYPE;
- } else if (dst == MPI_PROC_NULL) {
- retval = MPI_SUCCESS;
- } else {
- *request = simgrid::smpi::Request::send_init(buf, count, datatype, dst, tag, comm);
- retval = MPI_SUCCESS;
- }
- smpi_bench_begin();
- if (retval != MPI_SUCCESS && request != nullptr)
- *request = MPI_REQUEST_NULL;
- return retval;
-}
-
-int PMPI_Recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Request * request)
-{
- int retval = 0;
-
- smpi_bench_end();
- if (request == nullptr) {
- retval = MPI_ERR_ARG;
- } else if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if (not datatype->is_valid()) {
- retval = MPI_ERR_TYPE;
- } else if (src == MPI_PROC_NULL) {
- retval = MPI_SUCCESS;
- } else {
- *request = simgrid::smpi::Request::recv_init(buf, count, datatype, src, tag, comm);
- retval = MPI_SUCCESS;
- }
- smpi_bench_begin();
- if (retval != MPI_SUCCESS && request != nullptr)
- *request = MPI_REQUEST_NULL;
- return retval;
-}
-
-int PMPI_Ssend_init(void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request* request)
-{
- int retval = 0;
-
- smpi_bench_end();
- if (request == nullptr) {
- retval = MPI_ERR_ARG;
- } else if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if (not datatype->is_valid()) {
- retval = MPI_ERR_TYPE;
- } else if (dst == MPI_PROC_NULL) {
- retval = MPI_SUCCESS;
- } else {
- *request = simgrid::smpi::Request::ssend_init(buf, count, datatype, dst, tag, comm);
- retval = MPI_SUCCESS;
- }
- smpi_bench_begin();
- if (retval != MPI_SUCCESS && request != nullptr)
- *request = MPI_REQUEST_NULL;
- return retval;
-}
-
-int PMPI_Start(MPI_Request * request)
-{
- int retval = 0;
-
- smpi_bench_end();
- if (request == nullptr || *request == MPI_REQUEST_NULL) {
- retval = MPI_ERR_REQUEST;
- } else {
- (*request)->start();
- retval = MPI_SUCCESS;
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Startall(int count, MPI_Request * requests)
-{
- int retval;
- smpi_bench_end();
- if (requests == nullptr) {
- retval = MPI_ERR_ARG;
- } else {
- retval = MPI_SUCCESS;
- for (int i = 0; i < count; i++) {
- if(requests[i] == MPI_REQUEST_NULL) {
- retval = MPI_ERR_REQUEST;
- }
- }
- if(retval != MPI_ERR_REQUEST) {
- simgrid::smpi::Request::startall(count, requests);
- }
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Request_free(MPI_Request * request)
-{
- int retval = 0;
-
- smpi_bench_end();
- if (*request == MPI_REQUEST_NULL) {
- retval = MPI_ERR_ARG;
- } else {
- simgrid::smpi::Request::unref(request);
- retval = MPI_SUCCESS;
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Request * request)
-{
- int retval = 0;
-
- smpi_bench_end();
-
- if (request == nullptr) {
- retval = MPI_ERR_ARG;
- } else if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if (src == MPI_PROC_NULL) {
- *request = MPI_REQUEST_NULL;
- retval = MPI_SUCCESS;
- } else if (src!=MPI_ANY_SOURCE && (src >= comm->group()->size() || src <0)){
- retval = MPI_ERR_RANK;
- } else if ((count < 0) || (buf==nullptr && count > 0)) {
- retval = MPI_ERR_COUNT;
- } else if (not datatype->is_valid()) {
- retval = MPI_ERR_TYPE;
- } else if(tag<0 && tag != MPI_ANY_TAG){
- retval = MPI_ERR_TAG;
- } else {
-
- int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
- int src_traced = comm->group()->index(src);
-
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
- extra->type = TRACING_IRECV;
- extra->src = src_traced;
- extra->dst = rank;
- int known=0;
- extra->datatype1 = encode_datatype(datatype, &known);
- int dt_size_send = 1;
- if(known==0)
- dt_size_send = datatype->size();
- extra->send_size = count*dt_size_send;
- TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, extra);
-
- *request = simgrid::smpi::Request::irecv(buf, count, datatype, src, tag, comm);
- retval = MPI_SUCCESS;
-
- TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
- }
-
- smpi_bench_begin();
- if (retval != MPI_SUCCESS && request != nullptr)
- *request = MPI_REQUEST_NULL;
- return retval;
-}
-
-
-int PMPI_Isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request * request)
-{
- int retval = 0;
-
- smpi_bench_end();
- if (request == nullptr) {
- retval = MPI_ERR_ARG;
- } else if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if (dst == MPI_PROC_NULL) {
- *request = MPI_REQUEST_NULL;
- retval = MPI_SUCCESS;
- } else if (dst >= comm->group()->size() || dst <0){
- retval = MPI_ERR_RANK;
- } else if ((count < 0) || (buf==nullptr && count > 0)) {
- retval = MPI_ERR_COUNT;
- } else if (not datatype->is_valid()) {
- retval = MPI_ERR_TYPE;
- } else if(tag<0 && tag != MPI_ANY_TAG){
- retval = MPI_ERR_TAG;
- } else {
- int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
- int dst_traced = comm->group()->index(dst);
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
- extra->type = TRACING_ISEND;
- extra->src = rank;
- extra->dst = dst_traced;
- int known=0;
- extra->datatype1 = encode_datatype(datatype, &known);
- int dt_size_send = 1;
- if(known==0)
- dt_size_send = datatype->size();
- extra->send_size = count*dt_size_send;
- TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra);
- TRACE_smpi_send(rank, rank, dst_traced, tag, count*datatype->size());
-
- *request = simgrid::smpi::Request::isend(buf, count, datatype, dst, tag, comm);
- retval = MPI_SUCCESS;
-
- TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
- }
-
- smpi_bench_begin();
- if (retval != MPI_SUCCESS && request!=nullptr)
- *request = MPI_REQUEST_NULL;
- return retval;
-}
-
-int PMPI_Issend(void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request* request)
-{
- int retval = 0;
-
- smpi_bench_end();
- if (request == nullptr) {
- retval = MPI_ERR_ARG;
- } else if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if (dst == MPI_PROC_NULL) {
- *request = MPI_REQUEST_NULL;
- retval = MPI_SUCCESS;
- } else if (dst >= comm->group()->size() || dst <0){
- retval = MPI_ERR_RANK;
- } else if ((count < 0)|| (buf==nullptr && count > 0)) {
- retval = MPI_ERR_COUNT;
- } else if (not datatype->is_valid()) {
- retval = MPI_ERR_TYPE;
- } else if(tag<0 && tag != MPI_ANY_TAG){
- retval = MPI_ERR_TAG;
- } else {
- int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
- int dst_traced = comm->group()->index(dst);
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
- extra->type = TRACING_ISSEND;
- extra->src = rank;
- extra->dst = dst_traced;
- int known=0;
- extra->datatype1 = encode_datatype(datatype, &known);
- int dt_size_send = 1;
- if(known==0)
- dt_size_send = datatype->size();
- extra->send_size = count*dt_size_send;
- TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra);
- TRACE_smpi_send(rank, rank, dst_traced, tag, count*datatype->size());
-
- *request = simgrid::smpi::Request::issend(buf, count, datatype, dst, tag, comm);
- retval = MPI_SUCCESS;
-
- TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
- }
-
- smpi_bench_begin();
- if (retval != MPI_SUCCESS && request!=nullptr)
- *request = MPI_REQUEST_NULL;
- return retval;
-}
-
-int PMPI_Recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status)
-{
- int retval = 0;
-
- smpi_bench_end();
- if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if (src == MPI_PROC_NULL) {
- simgrid::smpi::Status::empty(status);
- status->MPI_SOURCE = MPI_PROC_NULL;
- retval = MPI_SUCCESS;
- } else if (src!=MPI_ANY_SOURCE && (src >= comm->group()->size() || src <0)){
- retval = MPI_ERR_RANK;
- } else if ((count < 0) || (buf==nullptr && count > 0)) {
- retval = MPI_ERR_COUNT;
- } else if (not datatype->is_valid()) {
- retval = MPI_ERR_TYPE;
- } else if(tag<0 && tag != MPI_ANY_TAG){
- retval = MPI_ERR_TAG;
- } else {
- int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
- int src_traced = comm->group()->index(src);
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
- extra->type = TRACING_RECV;
- extra->src = src_traced;
- extra->dst = rank;
- int known = 0;
- extra->datatype1 = encode_datatype(datatype, &known);
- int dt_size_send = 1;
- if (known == 0)
- dt_size_send = datatype->size();
- extra->send_size = count * dt_size_send;
- TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, extra);
-
- simgrid::smpi::Request::recv(buf, count, datatype, src, tag, comm, status);
- retval = MPI_SUCCESS;
-
- // the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE)
- if (status != MPI_STATUS_IGNORE) {
- src_traced = comm->group()->index(status->MPI_SOURCE);
- if (not TRACE_smpi_view_internals()) {
- TRACE_smpi_recv(rank, src_traced, rank, tag);
- }
- }
- TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
- }
-
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
-{
- int retval = 0;
-
- smpi_bench_end();
-
- if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if (dst == MPI_PROC_NULL) {
- retval = MPI_SUCCESS;
- } else if (dst >= comm->group()->size() || dst <0){
- retval = MPI_ERR_RANK;
- } else if ((count < 0) || (buf == nullptr && count > 0)) {
- retval = MPI_ERR_COUNT;
- } else if (not datatype->is_valid()) {
- retval = MPI_ERR_TYPE;
- } else if(tag < 0 && tag != MPI_ANY_TAG){
- retval = MPI_ERR_TAG;
- } else {
- int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
- int dst_traced = comm->group()->index(dst);
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
- extra->type = TRACING_SEND;
- extra->src = rank;
- extra->dst = dst_traced;
- int known = 0;
- extra->datatype1 = encode_datatype(datatype, &known);
- int dt_size_send = 1;
- if (known == 0) {
- dt_size_send = datatype->size();
- }
- extra->send_size = count*dt_size_send;
- TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra);
- if (not TRACE_smpi_view_internals()) {
- TRACE_smpi_send(rank, rank, dst_traced, tag,count*datatype->size());
- }
-
- simgrid::smpi::Request::send(buf, count, datatype, dst, tag, comm);
- retval = MPI_SUCCESS;
-
- TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
- }
-
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Ssend(void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) {
- int retval = 0;
-
- smpi_bench_end();
-
- if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if (dst == MPI_PROC_NULL) {
- retval = MPI_SUCCESS;
- } else if (dst >= comm->group()->size() || dst <0){
- retval = MPI_ERR_RANK;
- } else if ((count < 0) || (buf==nullptr && count > 0)) {
- retval = MPI_ERR_COUNT;
- } else if (not datatype->is_valid()) {
- retval = MPI_ERR_TYPE;
- } else if(tag<0 && tag != MPI_ANY_TAG){
- retval = MPI_ERR_TAG;
- } else {
- int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
- int dst_traced = comm->group()->index(dst);
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
- extra->type = TRACING_SSEND;
- extra->src = rank;
- extra->dst = dst_traced;
- int known = 0;
- extra->datatype1 = encode_datatype(datatype, &known);
- int dt_size_send = 1;
- if(known == 0) {
- dt_size_send = datatype->size();
- }
- extra->send_size = count*dt_size_send;
- TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra);
- TRACE_smpi_send(rank, rank, dst_traced, tag,count*datatype->size());
-
- simgrid::smpi::Request::ssend(buf, count, datatype, dst, tag, comm);
- retval = MPI_SUCCESS;
-
- TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
- }
-
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dst, int sendtag, void *recvbuf,
- int recvcount, MPI_Datatype recvtype, int src, int recvtag, MPI_Comm comm, MPI_Status * status)
-{
- int retval = 0;
-
- smpi_bench_end();
-
- if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if (not sendtype->is_valid() || not recvtype->is_valid()) {
- retval = MPI_ERR_TYPE;
- } else if (src == MPI_PROC_NULL || dst == MPI_PROC_NULL) {
- simgrid::smpi::Status::empty(status);
- status->MPI_SOURCE = MPI_PROC_NULL;
- retval = MPI_SUCCESS;
- }else if (dst >= comm->group()->size() || dst <0 ||
- (src!=MPI_ANY_SOURCE && (src >= comm->group()->size() || src <0))){
- retval = MPI_ERR_RANK;
- } else if ((sendcount < 0 || recvcount<0) ||
- (sendbuf==nullptr && sendcount > 0) || (recvbuf==nullptr && recvcount>0)) {
- retval = MPI_ERR_COUNT;
- } else if((sendtag<0 && sendtag != MPI_ANY_TAG)||(recvtag<0 && recvtag != MPI_ANY_TAG)){
- retval = MPI_ERR_TAG;
- } else {
-
- int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
- int dst_traced = comm->group()->index(dst);
- int src_traced = comm->group()->index(src);
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
- extra->type = TRACING_SENDRECV;
- extra->src = src_traced;
- extra->dst = dst_traced;
- int known=0;
- extra->datatype1 = encode_datatype(sendtype, &known);
- int dt_size_send = 1;
- if(known==0)
- dt_size_send = sendtype->size();
- extra->send_size = sendcount*dt_size_send;
- extra->datatype2 = encode_datatype(recvtype, &known);
- int dt_size_recv = 1;
- if(known==0)
- dt_size_recv = recvtype->size();
- extra->recv_size = recvcount*dt_size_recv;
-
- TRACE_smpi_ptp_in(rank, src_traced, dst_traced, __FUNCTION__, extra);
- TRACE_smpi_send(rank, rank, dst_traced, sendtag,sendcount*sendtype->size());
-
- simgrid::smpi::Request::sendrecv(sendbuf, sendcount, sendtype, dst, sendtag, recvbuf, recvcount, recvtype, src, recvtag, comm,
- status);
- retval = MPI_SUCCESS;
-
- TRACE_smpi_ptp_out(rank, src_traced, dst_traced, __FUNCTION__);
- TRACE_smpi_recv(rank, src_traced, rank, recvtag);
- }
-
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Sendrecv_replace(void* buf, int count, MPI_Datatype datatype, int dst, int sendtag, int src, int recvtag,
- MPI_Comm comm, MPI_Status* status)
-{
- int retval = 0;
- if (not datatype->is_valid()) {
- return MPI_ERR_TYPE;
- } else if (count < 0) {
- return MPI_ERR_COUNT;
- } else {
- int size = datatype->get_extent() * count;
- void* recvbuf = xbt_new0(char, size);
- retval = MPI_Sendrecv(buf, count, datatype, dst, sendtag, recvbuf, count, datatype, src, recvtag, comm, status);
- if(retval==MPI_SUCCESS){
- simgrid::smpi::Datatype::copy(recvbuf, count, datatype, buf, count, datatype);
- }
- xbt_free(recvbuf);
-
- }
- return retval;
-}
-
-int PMPI_Test(MPI_Request * request, int *flag, MPI_Status * status)
-{
- int retval = 0;
- smpi_bench_end();
- if (request == nullptr || flag == nullptr) {
- retval = MPI_ERR_ARG;
- } else if (*request == MPI_REQUEST_NULL) {
- *flag= true;
- simgrid::smpi::Status::empty(status);
- retval = MPI_SUCCESS;
- } else {
- int rank = ((*request)->comm() != MPI_COMM_NULL) ? smpi_process()->index() : -1;
-
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
- extra->type = TRACING_TEST;
- TRACE_smpi_testing_in(rank, extra);
-
- *flag = simgrid::smpi::Request::test(request,status);
-
- TRACE_smpi_testing_out(rank);
- retval = MPI_SUCCESS;
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Testany(int count, MPI_Request requests[], int *index, int *flag, MPI_Status * status)
-{
- int retval = 0;
-
- smpi_bench_end();
- if (index == nullptr || flag == nullptr) {
- retval = MPI_ERR_ARG;
- } else {
- *flag = simgrid::smpi::Request::testany(count, requests, index, status);
- retval = MPI_SUCCESS;
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Testall(int count, MPI_Request* requests, int* flag, MPI_Status* statuses)
-{
- int retval = 0;
-
- smpi_bench_end();
- if (flag == nullptr) {
- retval = MPI_ERR_ARG;
- } else {
- *flag = simgrid::smpi::Request::testall(count, requests, statuses);
- retval = MPI_SUCCESS;
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Probe(int source, int tag, MPI_Comm comm, MPI_Status* status) {
- int retval = 0;
- smpi_bench_end();
-
- if (status == nullptr) {
- retval = MPI_ERR_ARG;
- } else if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if (source == MPI_PROC_NULL) {
- simgrid::smpi::Status::empty(status);
- status->MPI_SOURCE = MPI_PROC_NULL;
- retval = MPI_SUCCESS;
- } else {
- simgrid::smpi::Request::probe(source, tag, comm, status);
- retval = MPI_SUCCESS;
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status) {
- int retval = 0;
- smpi_bench_end();
-
- if (flag == nullptr) {
- retval = MPI_ERR_ARG;
- } else if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if (source == MPI_PROC_NULL) {
- *flag=true;
- simgrid::smpi::Status::empty(status);
- status->MPI_SOURCE = MPI_PROC_NULL;
- retval = MPI_SUCCESS;
- } else {
- simgrid::smpi::Request::iprobe(source, tag, comm, flag, status);
- retval = MPI_SUCCESS;
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Wait(MPI_Request * request, MPI_Status * status)
-{
- int retval = 0;
-
- smpi_bench_end();
-
- simgrid::smpi::Status::empty(status);
-
- if (request == nullptr) {
- retval = MPI_ERR_ARG;
- } else if (*request == MPI_REQUEST_NULL) {
- retval = MPI_SUCCESS;
- } else {
-
- int rank = (request!=nullptr && (*request)->comm() != MPI_COMM_NULL) ? smpi_process()->index() : -1;
-
- int src_traced = (*request)->src();
- int dst_traced = (*request)->dst();
- int tag_traced= (*request)->tag();
- MPI_Comm comm = (*request)->comm();
- int is_wait_for_receive = ((*request)->flags() & RECV);
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
- extra->type = TRACING_WAIT;
- TRACE_smpi_ptp_in(rank, src_traced, dst_traced, __FUNCTION__, extra);
-
- simgrid::smpi::Request::wait(request, status);
- retval = MPI_SUCCESS;
-
- //the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE)
- TRACE_smpi_ptp_out(rank, src_traced, dst_traced, __FUNCTION__);
- if (is_wait_for_receive) {
- if(src_traced==MPI_ANY_SOURCE)
- src_traced = (status!=MPI_STATUS_IGNORE) ?
- comm->group()->rank(status->MPI_SOURCE) :
- src_traced;
- TRACE_smpi_recv(rank, src_traced, dst_traced, tag_traced);
- }
- }
-
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Waitany(int count, MPI_Request requests[], int *index, MPI_Status * status)
-{
- if (index == nullptr)
- return MPI_ERR_ARG;
-
- smpi_bench_end();
- //save requests information for tracing
- typedef struct {
- int src;
- int dst;
- int recv;
- int tag;
- MPI_Comm comm;
- } savedvalstype;
- savedvalstype* savedvals=nullptr;
- if(count>0){
- savedvals = xbt_new0(savedvalstype, count);
- }
- for (int i = 0; i < count; i++) {
- MPI_Request req = requests[i]; //already received requests are no longer valid
- if (req) {
- savedvals[i]=(savedvalstype){req->src(), req->dst(), (req->flags() & RECV), req->tag(), req->comm()};
- }
- }
- int rank_traced = smpi_process()->index();
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
- extra->type = TRACING_WAITANY;
- extra->send_size=count;
- TRACE_smpi_ptp_in(rank_traced, -1, -1, __FUNCTION__,extra);
-
- *index = simgrid::smpi::Request::waitany(count, requests, status);
-
- if(*index!=MPI_UNDEFINED){
- int src_traced = savedvals[*index].src;
- //the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE)
- int dst_traced = savedvals[*index].dst;
- int is_wait_for_receive = savedvals[*index].recv;
- if (is_wait_for_receive) {
- if(savedvals[*index].src==MPI_ANY_SOURCE)
- src_traced = (status != MPI_STATUSES_IGNORE)
- ? savedvals[*index].comm->group()->rank(status->MPI_SOURCE)
- : savedvals[*index].src;
- TRACE_smpi_recv(rank_traced, src_traced, dst_traced, savedvals[*index].tag);
- }
- TRACE_smpi_ptp_out(rank_traced, src_traced, dst_traced, __FUNCTION__);
- }
- xbt_free(savedvals);
-
- smpi_bench_begin();
- return MPI_SUCCESS;
-}
-
-int PMPI_Waitall(int count, MPI_Request requests[], MPI_Status status[])
-{
- smpi_bench_end();
- //save information from requests
- typedef struct {
- int src;
- int dst;
- int recv;
- int tag;
- int valid;
- MPI_Comm comm;
- } savedvalstype;
- savedvalstype* savedvals=xbt_new0(savedvalstype, count);
-
- for (int i = 0; i < count; i++) {
- MPI_Request req = requests[i];
- if(req!=MPI_REQUEST_NULL){
- savedvals[i]=(savedvalstype){req->src(), req->dst(), (req->flags() & RECV), req->tag(), 1, req->comm()};
- }else{
- savedvals[i].valid=0;
- }
- }
- int rank_traced = smpi_process()->index();
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
- extra->type = TRACING_WAITALL;
- extra->send_size=count;
- TRACE_smpi_ptp_in(rank_traced, -1, -1, __FUNCTION__,extra);
-
- int retval = simgrid::smpi::Request::waitall(count, requests, status);
-
- for (int i = 0; i < count; i++) {
- if(savedvals[i].valid){
- //the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE)
- int src_traced = savedvals[i].src;
- int dst_traced = savedvals[i].dst;
- int is_wait_for_receive = savedvals[i].recv;
- if (is_wait_for_receive) {
- if(src_traced==MPI_ANY_SOURCE)
- src_traced = (status!=MPI_STATUSES_IGNORE) ?
- savedvals[i].comm->group()->rank(status[i].MPI_SOURCE) : savedvals[i].src;
- TRACE_smpi_recv(rank_traced, src_traced, dst_traced,savedvals[i].tag);
- }
- }
- }
- TRACE_smpi_ptp_out(rank_traced, -1, -1, __FUNCTION__);
- xbt_free(savedvals);
-
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Waitsome(int incount, MPI_Request requests[], int *outcount, int *indices, MPI_Status status[])
-{
- int retval = 0;
-
- smpi_bench_end();
- if (outcount == nullptr) {
- retval = MPI_ERR_ARG;
- } else {
- *outcount = simgrid::smpi::Request::waitsome(incount, requests, indices, status);
- retval = MPI_SUCCESS;
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Testsome(int incount, MPI_Request requests[], int* outcount, int* indices, MPI_Status status[])
-{
- int retval = 0;
-
- smpi_bench_end();
- if (outcount == nullptr) {
- retval = MPI_ERR_ARG;
- } else {
- *outcount = simgrid::smpi::Request::testsome(incount, requests, indices, status);
- retval = MPI_SUCCESS;
- }
- smpi_bench_begin();
- return retval;
-}
-
-
-int PMPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm)
-{
- int retval = 0;
-
- smpi_bench_end();
-
- if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if (not datatype->is_valid()) {
- retval = MPI_ERR_ARG;
- } else {
- int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
- int root_traced = comm->group()->index(root);
-
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
- extra->type = TRACING_BCAST;
- extra->root = root_traced;
- int known = 0;
- extra->datatype1 = encode_datatype(datatype, &known);
- int dt_size_send = 1;
- if (known == 0)
- dt_size_send = datatype->size();
- extra->send_size = count * dt_size_send;
- TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
- if (comm->size() > 1)
- simgrid::smpi::Colls::bcast(buf, count, datatype, root, comm);
- retval = MPI_SUCCESS;
-
- TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Barrier(MPI_Comm comm)
-{
- int retval = 0;
-
- smpi_bench_end();
-
- if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else {
- int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
- extra->type = TRACING_BARRIER;
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
-
- simgrid::smpi::Colls::barrier(comm);
-
- //Barrier can be used to synchronize RMA calls. Finish all requests from comm before.
- comm->finish_rma_calls();
-
- retval = MPI_SUCCESS;
-
- TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
- }
-
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvbuf, int recvcount, MPI_Datatype recvtype,
- int root, MPI_Comm comm)
-{
- int retval = 0;
-
- smpi_bench_end();
-
- if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) ||
- ((comm->rank() == root) && (recvtype == MPI_DATATYPE_NULL))){
- retval = MPI_ERR_TYPE;
- } else if ((( sendbuf != MPI_IN_PLACE) && (sendcount <0)) || ((comm->rank() == root) && (recvcount <0))){
- retval = MPI_ERR_COUNT;
- } else {
-
- char* sendtmpbuf = static_cast<char*>(sendbuf);
- int sendtmpcount = sendcount;
- MPI_Datatype sendtmptype = sendtype;
- if( (comm->rank() == root) && (sendbuf == MPI_IN_PLACE )) {
- sendtmpcount=0;
- sendtmptype=recvtype;
- }
- int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
- int root_traced = comm->group()->index(root);
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
- extra->type = TRACING_GATHER;
- extra->root = root_traced;
- int known = 0;
- extra->datatype1 = encode_datatype(sendtmptype, &known);
- int dt_size_send = 1;
- if (known == 0)
- dt_size_send = sendtmptype->size();
- extra->send_size = sendtmpcount * dt_size_send;
- extra->datatype2 = encode_datatype(recvtype, &known);
- int dt_size_recv = 1;
- if ((comm->rank() == root) && known == 0)
- dt_size_recv = recvtype->size();
- extra->recv_size = recvcount * dt_size_recv;
-
- TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
-
- simgrid::smpi::Colls::gather(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcount, recvtype, root, comm);
-
- retval = MPI_SUCCESS;
- TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
- }
-
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int *recvcounts, int *displs,
- MPI_Datatype recvtype, int root, MPI_Comm comm)
-{
- int retval = 0;
-
- smpi_bench_end();
-
- if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) ||
- ((comm->rank() == root) && (recvtype == MPI_DATATYPE_NULL))){
- retval = MPI_ERR_TYPE;
- } else if (( sendbuf != MPI_IN_PLACE) && (sendcount <0)){
- retval = MPI_ERR_COUNT;
- } else if (recvcounts == nullptr || displs == nullptr) {
- retval = MPI_ERR_ARG;
- } else {
- char* sendtmpbuf = static_cast<char*>(sendbuf);
- int sendtmpcount = sendcount;
- MPI_Datatype sendtmptype = sendtype;
- if( (comm->rank() == root) && (sendbuf == MPI_IN_PLACE )) {
- sendtmpcount=0;
- sendtmptype=recvtype;
- }
-
- int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
- int root_traced = comm->group()->index(root);
- int size = comm->size();
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
- extra->type = TRACING_GATHERV;
- extra->num_processes = size;
- extra->root = root_traced;
- int known = 0;
- extra->datatype1 = encode_datatype(sendtmptype, &known);
- int dt_size_send = 1;
- if (known == 0)
- dt_size_send = sendtype->size();
- extra->send_size = sendtmpcount * dt_size_send;
- extra->datatype2 = encode_datatype(recvtype, &known);
- int dt_size_recv = 1;
- if (known == 0)
- dt_size_recv = recvtype->size();
- if (comm->rank() == root) {
- extra->recvcounts = xbt_new(int, size);
- for (int i = 0; i < size; i++) // copy data to avoid bad free
- extra->recvcounts[i] = recvcounts[i] * dt_size_recv;
- }
- TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
-
- retval = simgrid::smpi::Colls::gatherv(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcounts, displs, recvtype, root, comm);
- TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
- }
-
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
- void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
-{
- int retval = 0;
-
- smpi_bench_end();
-
- if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) ||
- (recvtype == MPI_DATATYPE_NULL)){
- retval = MPI_ERR_TYPE;
- } else if ((( sendbuf != MPI_IN_PLACE) && (sendcount <0)) ||
- (recvcount <0)){
- retval = MPI_ERR_COUNT;
- } else {
- if(sendbuf == MPI_IN_PLACE) {
- sendbuf=static_cast<char*>(recvbuf)+recvtype->get_extent()*recvcount*comm->rank();
- sendcount=recvcount;
- sendtype=recvtype;
- }
- int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
- extra->type = TRACING_ALLGATHER;
- int known = 0;
- extra->datatype1 = encode_datatype(sendtype, &known);
- int dt_size_send = 1;
- if (known == 0)
- dt_size_send = sendtype->size();
- extra->send_size = sendcount * dt_size_send;
- extra->datatype2 = encode_datatype(recvtype, &known);
- int dt_size_recv = 1;
- if (known == 0)
- dt_size_recv = recvtype->size();
- extra->recv_size = recvcount * dt_size_recv;
-
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
-
- simgrid::smpi::Colls::allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm);
- retval = MPI_SUCCESS;
- TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
- void *recvbuf, int *recvcounts, int *displs, MPI_Datatype recvtype, MPI_Comm comm)
-{
- int retval = 0;
-
- smpi_bench_end();
-
- if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if (((sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) || (recvtype == MPI_DATATYPE_NULL)) {
- retval = MPI_ERR_TYPE;
- } else if (( sendbuf != MPI_IN_PLACE) && (sendcount <0)){
- retval = MPI_ERR_COUNT;
- } else if (recvcounts == nullptr || displs == nullptr) {
- retval = MPI_ERR_ARG;
- } else {
-
- if(sendbuf == MPI_IN_PLACE) {
- sendbuf=static_cast<char*>(recvbuf)+recvtype->get_extent()*displs[comm->rank()];
- sendcount=recvcounts[comm->rank()];
- sendtype=recvtype;
- }
- int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
- int i = 0;
- int size = comm->size();
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
- extra->type = TRACING_ALLGATHERV;
- extra->num_processes = size;
- int known = 0;
- extra->datatype1 = encode_datatype(sendtype, &known);
- int dt_size_send = 1;
- if (known == 0)
- dt_size_send = sendtype->size();
- extra->send_size = sendcount * dt_size_send;
- extra->datatype2 = encode_datatype(recvtype, &known);
- int dt_size_recv = 1;
- if (known == 0)
- dt_size_recv = recvtype->size();
- extra->recvcounts = xbt_new(int, size);
- for (i = 0; i < size; i++) // copy data to avoid bad free
- extra->recvcounts[i] = recvcounts[i] * dt_size_recv;
-
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
-
- simgrid::smpi::Colls::allgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm);
- retval = MPI_SUCCESS;
- TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
- }
-
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
- void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
-{
- int retval = 0;
-
- smpi_bench_end();
-
- if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if (((comm->rank() == root) && (not sendtype->is_valid())) ||
- ((recvbuf != MPI_IN_PLACE) && (not recvtype->is_valid()))) {
- retval = MPI_ERR_TYPE;
- } else if ((sendbuf == recvbuf) ||
- ((comm->rank()==root) && sendcount>0 && (sendbuf == nullptr))){
- retval = MPI_ERR_BUFFER;
- }else {
-
- if (recvbuf == MPI_IN_PLACE) {
- recvtype = sendtype;
- recvcount = sendcount;
- }
- int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
- int root_traced = comm->group()->index(root);
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
- extra->type = TRACING_SCATTER;
- extra->root = root_traced;
- int known = 0;
- extra->datatype1 = encode_datatype(sendtype, &known);
- int dt_size_send = 1;
- if ((comm->rank() == root) && known == 0)
- dt_size_send = sendtype->size();
- extra->send_size = sendcount * dt_size_send;
- extra->datatype2 = encode_datatype(recvtype, &known);
- int dt_size_recv = 1;
- if (known == 0)
- dt_size_recv = recvtype->size();
- extra->recv_size = recvcount * dt_size_recv;
- TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
-
- simgrid::smpi::Colls::scatter(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm);
- retval = MPI_SUCCESS;
- TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
- }
-
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Scatterv(void *sendbuf, int *sendcounts, int *displs,
- MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
-{
- int retval = 0;
-
- smpi_bench_end();
-
- if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if (sendcounts == nullptr || displs == nullptr) {
- retval = MPI_ERR_ARG;
- } else if (((comm->rank() == root) && (sendtype == MPI_DATATYPE_NULL)) ||
- ((recvbuf != MPI_IN_PLACE) && (recvtype == MPI_DATATYPE_NULL))) {
- retval = MPI_ERR_TYPE;
- } else {
- if (recvbuf == MPI_IN_PLACE) {
- recvtype = sendtype;
- recvcount = sendcounts[comm->rank()];
- }
- int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
- int root_traced = comm->group()->index(root);
- int size = comm->size();
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
- extra->type = TRACING_SCATTERV;
- extra->num_processes = size;
- extra->root = root_traced;
- int known = 0;
- extra->datatype1 = encode_datatype(sendtype, &known);
- int dt_size_send = 1;
- if (known == 0)
- dt_size_send = sendtype->size();
- if (comm->rank() == root) {
- extra->sendcounts = xbt_new(int, size);
- for (int i = 0; i < size; i++) // copy data to avoid bad free
- extra->sendcounts[i] = sendcounts[i] * dt_size_send;
- }
- extra->datatype2 = encode_datatype(recvtype, &known);
- int dt_size_recv = 1;
- if (known == 0)
- dt_size_recv = recvtype->size();
- extra->recv_size = recvcount * dt_size_recv;
- TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
-
- retval = simgrid::smpi::Colls::scatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm);
-
- TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
- }
-
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Reduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
-{
- int retval = 0;
-
- smpi_bench_end();
-
- if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if (not datatype->is_valid() || op == MPI_OP_NULL) {
- retval = MPI_ERR_ARG;
- } else {
- int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
- int root_traced = comm->group()->index(root);
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
- extra->type = TRACING_REDUCE;
- int known = 0;
- extra->datatype1 = encode_datatype(datatype, &known);
- int dt_size_send = 1;
- if (known == 0)
- dt_size_send = datatype->size();
- extra->send_size = count * dt_size_send;
- extra->root = root_traced;
-
- TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
-
- simgrid::smpi::Colls::reduce(sendbuf, recvbuf, count, datatype, op, root, comm);
-
- retval = MPI_SUCCESS;
- TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
- }
-
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Reduce_local(void *inbuf, void *inoutbuf, int count, MPI_Datatype datatype, MPI_Op op){
- int retval = 0;
-
- smpi_bench_end();
- if (not datatype->is_valid() || op == MPI_OP_NULL) {
- retval = MPI_ERR_ARG;
- } else {
- op->apply(inbuf, inoutbuf, &count, datatype);
- retval = MPI_SUCCESS;
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Allreduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
-{
- int retval = 0;
-
- smpi_bench_end();
-
- if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if (not datatype->is_valid()) {
- retval = MPI_ERR_TYPE;
- } else if (op == MPI_OP_NULL) {
- retval = MPI_ERR_OP;
- } else {
-
- char* sendtmpbuf = static_cast<char*>(sendbuf);
- if( sendbuf == MPI_IN_PLACE ) {
- sendtmpbuf = static_cast<char*>(xbt_malloc(count*datatype->get_extent()));
- simgrid::smpi::Datatype::copy(recvbuf, count, datatype,sendtmpbuf, count, datatype);
- }
- int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
- extra->type = TRACING_ALLREDUCE;
- int known = 0;
- extra->datatype1 = encode_datatype(datatype, &known);
- int dt_size_send = 1;
- if (known == 0)
- dt_size_send = datatype->size();
- extra->send_size = count * dt_size_send;
-
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
-
- simgrid::smpi::Colls::allreduce(sendtmpbuf, recvbuf, count, datatype, op, comm);
-
- if( sendbuf == MPI_IN_PLACE )
- xbt_free(sendtmpbuf);
-
- retval = MPI_SUCCESS;
- TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
- }
-
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Scan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
-{
- int retval = 0;
-
- smpi_bench_end();
-
- if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if (not datatype->is_valid()) {
- retval = MPI_ERR_TYPE;
- } else if (op == MPI_OP_NULL) {
- retval = MPI_ERR_OP;
- } else {
- int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
- extra->type = TRACING_SCAN;
- int known = 0;
- extra->datatype1 = encode_datatype(datatype, &known);
- int dt_size_send = 1;
- if (known == 0)
- dt_size_send = datatype->size();
- extra->send_size = count * dt_size_send;
-
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
-
- retval = simgrid::smpi::Colls::scan(sendbuf, recvbuf, count, datatype, op, comm);
-
- TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
- }
-
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Exscan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm){
- int retval = 0;
-
- smpi_bench_end();
-
- if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if (not datatype->is_valid()) {
- retval = MPI_ERR_TYPE;
- } else if (op == MPI_OP_NULL) {
- retval = MPI_ERR_OP;
- } else {
- int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
- extra->type = TRACING_EXSCAN;
- int known = 0;
- extra->datatype1 = encode_datatype(datatype, &known);
- int dt_size_send = 1;
- if (known == 0)
- dt_size_send = datatype->size();
- extra->send_size = count * dt_size_send;
- void* sendtmpbuf = sendbuf;
- if (sendbuf == MPI_IN_PLACE) {
- sendtmpbuf = static_cast<void*>(xbt_malloc(count * datatype->size()));
- memcpy(sendtmpbuf, recvbuf, count * datatype->size());
- }
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
-
- retval = simgrid::smpi::Colls::exscan(sendtmpbuf, recvbuf, count, datatype, op, comm);
-
- TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
- if (sendbuf == MPI_IN_PLACE)
- xbt_free(sendtmpbuf);
- }
-
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
-{
- int retval = 0;
- smpi_bench_end();
-
- if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if (not datatype->is_valid()) {
- retval = MPI_ERR_TYPE;
- } else if (op == MPI_OP_NULL) {
- retval = MPI_ERR_OP;
- } else if (recvcounts == nullptr) {
- retval = MPI_ERR_ARG;
- } else {
- int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
- int i = 0;
- int size = comm->size();
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
- extra->type = TRACING_REDUCE_SCATTER;
- extra->num_processes = size;
- int known = 0;
- extra->datatype1 = encode_datatype(datatype, &known);
- int dt_size_send = 1;
- if (known == 0)
- dt_size_send = datatype->size();
- extra->send_size = 0;
- extra->recvcounts = xbt_new(int, size);
- int totalcount = 0;
- for (i = 0; i < size; i++) { // copy data to avoid bad free
- extra->recvcounts[i] = recvcounts[i] * dt_size_send;
- totalcount += recvcounts[i];
- }
- void* sendtmpbuf = sendbuf;
- if (sendbuf == MPI_IN_PLACE) {
- sendtmpbuf = static_cast<void*>(xbt_malloc(totalcount * datatype->size()));
- memcpy(sendtmpbuf, recvbuf, totalcount * datatype->size());
- }
-
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
-
- simgrid::smpi::Colls::reduce_scatter(sendtmpbuf, recvbuf, recvcounts, datatype, op, comm);
- retval = MPI_SUCCESS;
- TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
-
- if (sendbuf == MPI_IN_PLACE)
- xbt_free(sendtmpbuf);
- }
-
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Reduce_scatter_block(void *sendbuf, void *recvbuf, int recvcount,
- MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
-{
- int retval;
- smpi_bench_end();
-
- if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if (not datatype->is_valid()) {
- retval = MPI_ERR_TYPE;
- } else if (op == MPI_OP_NULL) {
- retval = MPI_ERR_OP;
- } else if (recvcount < 0) {
- retval = MPI_ERR_ARG;
- } else {
- int count = comm->size();
-
- int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
- extra->type = TRACING_REDUCE_SCATTER;
- extra->num_processes = count;
- int known = 0;
- extra->datatype1 = encode_datatype(datatype, &known);
- int dt_size_send = 1;
- if (known == 0)
- dt_size_send = datatype->size();
- extra->send_size = 0;
- extra->recvcounts = xbt_new(int, count);
- for (int i = 0; i < count; i++) // copy data to avoid bad free
- extra->recvcounts[i] = recvcount * dt_size_send;
- void* sendtmpbuf = sendbuf;
- if (sendbuf == MPI_IN_PLACE) {
- sendtmpbuf = static_cast<void*>(xbt_malloc(recvcount * count * datatype->size()));
- memcpy(sendtmpbuf, recvbuf, recvcount * count * datatype->size());
- }
-
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
-
- int* recvcounts = static_cast<int*>(xbt_malloc(count * sizeof(int)));
- for (int i = 0; i < count; i++)
- recvcounts[i] = recvcount;
- simgrid::smpi::Colls::reduce_scatter(sendtmpbuf, recvbuf, recvcounts, datatype, op, comm);
- xbt_free(recvcounts);
- retval = MPI_SUCCESS;
-
- TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
-
- if (sendbuf == MPI_IN_PLACE)
- xbt_free(sendtmpbuf);
- }
-
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Alltoall(void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
- MPI_Datatype recvtype, MPI_Comm comm)
-{
- int retval = 0;
- smpi_bench_end();
-
- if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if ((sendbuf != MPI_IN_PLACE && sendtype == MPI_DATATYPE_NULL) || recvtype == MPI_DATATYPE_NULL) {
- retval = MPI_ERR_TYPE;
- } else {
- int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
- extra->type = TRACING_ALLTOALL;
-
- void* sendtmpbuf = static_cast<char*>(sendbuf);
- int sendtmpcount = sendcount;
- MPI_Datatype sendtmptype = sendtype;
- if (sendbuf == MPI_IN_PLACE) {
- sendtmpbuf = static_cast<void*>(xbt_malloc(recvcount * comm->size() * recvtype->size()));
- memcpy(sendtmpbuf, recvbuf, recvcount * comm->size() * recvtype->size());
- sendtmpcount = recvcount;
- sendtmptype = recvtype;
- }
-
- int known = 0;
- extra->datatype1 = encode_datatype(sendtmptype, &known);
- if (known == 0)
- extra->send_size = sendtmpcount * sendtmptype->size();
- else
- extra->send_size = sendtmpcount;
- extra->datatype2 = encode_datatype(recvtype, &known);
- if (known == 0)
- extra->recv_size = recvcount * recvtype->size();
- else
- extra->recv_size = recvcount;
-
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
-
- retval = simgrid::smpi::Colls::alltoall(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcount, recvtype, comm);
-
- TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
-
- if (sendbuf == MPI_IN_PLACE)
- xbt_free(sendtmpbuf);
- }
-
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Alltoallv(void* sendbuf, int* sendcounts, int* senddisps, MPI_Datatype sendtype, void* recvbuf,
- int* recvcounts, int* recvdisps, MPI_Datatype recvtype, MPI_Comm comm)
-{
- int retval = 0;
-
- smpi_bench_end();
-
- if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
- } else if (sendtype == MPI_DATATYPE_NULL || recvtype == MPI_DATATYPE_NULL) {
- retval = MPI_ERR_TYPE;
- } else if ((sendbuf != MPI_IN_PLACE && (sendcounts == nullptr || senddisps == nullptr)) || recvcounts == nullptr ||
- recvdisps == nullptr) {
- retval = MPI_ERR_ARG;
- } else {
- int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
- int i = 0;
- int size = comm->size();
- instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
- extra->type = TRACING_ALLTOALLV;
- extra->send_size = 0;
- extra->recv_size = 0;
- extra->recvcounts = xbt_new(int, size);
- extra->sendcounts = xbt_new(int, size);
- int known = 0;
- extra->datatype2 = encode_datatype(recvtype, &known);
- int dt_size_recv = recvtype->size();
-
- void* sendtmpbuf = static_cast<char*>(sendbuf);
- int* sendtmpcounts = sendcounts;
- int* sendtmpdisps = senddisps;
- MPI_Datatype sendtmptype = sendtype;
- int maxsize = 0;
- for (i = 0; i < size; i++) { // copy data to avoid bad free
- extra->recv_size += recvcounts[i] * dt_size_recv;
- extra->recvcounts[i] = recvcounts[i] * dt_size_recv;
- if (((recvdisps[i] + recvcounts[i]) * dt_size_recv) > maxsize)
- maxsize = (recvdisps[i] + recvcounts[i]) * dt_size_recv;
- }
-
- if (sendbuf == MPI_IN_PLACE) {
- sendtmpbuf = static_cast<void*>(xbt_malloc(maxsize));
- memcpy(sendtmpbuf, recvbuf, maxsize);
- sendtmpcounts = static_cast<int*>(xbt_malloc(size * sizeof(int)));
- memcpy(sendtmpcounts, recvcounts, size * sizeof(int));
- sendtmpdisps = static_cast<int*>(xbt_malloc(size * sizeof(int)));
- memcpy(sendtmpdisps, recvdisps, size * sizeof(int));
- sendtmptype = recvtype;
- }
-
- extra->datatype1 = encode_datatype(sendtmptype, &known);
- int dt_size_send = sendtmptype->size();
-
- for (i = 0; i < size; i++) { // copy data to avoid bad free
- extra->send_size += sendtmpcounts[i] * dt_size_send;
- extra->sendcounts[i] = sendtmpcounts[i] * dt_size_send;
- }
- extra->num_processes = size;
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
- retval = simgrid::smpi::Colls::alltoallv(sendtmpbuf, sendtmpcounts, sendtmpdisps, sendtmptype, recvbuf, recvcounts,
- recvdisps, recvtype, comm);
- TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
-
- if (sendbuf == MPI_IN_PLACE) {
- xbt_free(sendtmpbuf);
- xbt_free(sendtmpcounts);
- xbt_free(sendtmpdisps);
- }
- }
-
- smpi_bench_begin();
- return retval;
-}
-
-
-int PMPI_Get_processor_name(char *name, int *resultlen)
-{
- strncpy(name, sg_host_self()->getCname(), strlen(sg_host_self()->getCname()) < MPI_MAX_PROCESSOR_NAME - 1
- ? strlen(sg_host_self()->getCname()) + 1
- : MPI_MAX_PROCESSOR_NAME - 1);
- *resultlen = strlen(name) > MPI_MAX_PROCESSOR_NAME ? MPI_MAX_PROCESSOR_NAME : strlen(name);
-
- return MPI_SUCCESS;
-}
-
-int PMPI_Get_count(MPI_Status * status, MPI_Datatype datatype, int *count)
-{
- if (status == nullptr || count == nullptr) {
- return MPI_ERR_ARG;
- } else if (not datatype->is_valid()) {
- return MPI_ERR_TYPE;
- } else {
- size_t size = datatype->size();
- if (size == 0) {
- *count = 0;
- return MPI_SUCCESS;
- } else if (status->count % size != 0) {
- return MPI_UNDEFINED;
- } else {
- *count = simgrid::smpi::Status::get_count(status, datatype);
- return MPI_SUCCESS;
- }
- }
-}
-
-int PMPI_Type_contiguous(int count, MPI_Datatype old_type, MPI_Datatype* new_type) {
- if (old_type == MPI_DATATYPE_NULL) {
- return MPI_ERR_TYPE;
- } else if (count<0){
- return MPI_ERR_COUNT;
- } else {
- return simgrid::smpi::Datatype::create_contiguous(count, old_type, 0, new_type);
- }
-}
-
-int PMPI_Type_commit(MPI_Datatype* datatype) {
- if (datatype == nullptr || *datatype == MPI_DATATYPE_NULL) {
- return MPI_ERR_TYPE;
- } else {
- (*datatype)->commit();
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Type_vector(int count, int blocklen, int stride, MPI_Datatype old_type, MPI_Datatype* new_type) {
- if (old_type == MPI_DATATYPE_NULL) {
- return MPI_ERR_TYPE;
- } else if (count<0 || blocklen<0){
- return MPI_ERR_COUNT;
- } else {
- return simgrid::smpi::Datatype::create_vector(count, blocklen, stride, old_type, new_type);
- }
-}
-
-int PMPI_Type_hvector(int count, int blocklen, MPI_Aint stride, MPI_Datatype old_type, MPI_Datatype* new_type) {
- if (old_type == MPI_DATATYPE_NULL) {
- return MPI_ERR_TYPE;
- } else if (count<0 || blocklen<0){
- return MPI_ERR_COUNT;
- } else {
- return simgrid::smpi::Datatype::create_hvector(count, blocklen, stride, old_type, new_type);
- }
-}
-
-int PMPI_Type_create_hvector(int count, int blocklen, MPI_Aint stride, MPI_Datatype old_type, MPI_Datatype* new_type) {
- return MPI_Type_hvector(count, blocklen, stride, old_type, new_type);
-}
-
-int PMPI_Type_indexed(int count, int* blocklens, int* indices, MPI_Datatype old_type, MPI_Datatype* new_type) {
- if (old_type == MPI_DATATYPE_NULL) {
- return MPI_ERR_TYPE;
- } else if (count<0){
- return MPI_ERR_COUNT;
- } else {
- return simgrid::smpi::Datatype::create_indexed(count, blocklens, indices, old_type, new_type);
- }
-}
-
-int PMPI_Type_create_indexed(int count, int* blocklens, int* indices, MPI_Datatype old_type, MPI_Datatype* new_type) {
- if (old_type == MPI_DATATYPE_NULL) {
- return MPI_ERR_TYPE;
- } else if (count<0){
- return MPI_ERR_COUNT;
- } else {
- return simgrid::smpi::Datatype::create_indexed(count, blocklens, indices, old_type, new_type);
- }
-}
-
-int PMPI_Type_create_indexed_block(int count, int blocklength, int* indices, MPI_Datatype old_type,
- MPI_Datatype* new_type)
-{
- if (old_type == MPI_DATATYPE_NULL) {
- return MPI_ERR_TYPE;
- } else if (count<0){
- return MPI_ERR_COUNT;
- } else {
- int* blocklens=static_cast<int*>(xbt_malloc(blocklength*count*sizeof(int)));
- for (int i = 0; i < count; i++)
- blocklens[i]=blocklength;
- int retval = simgrid::smpi::Datatype::create_indexed(count, blocklens, indices, old_type, new_type);
- xbt_free(blocklens);
- return retval;
- }
-}
-
-int PMPI_Type_hindexed(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype old_type, MPI_Datatype* new_type)
-{
- if (old_type == MPI_DATATYPE_NULL) {
- return MPI_ERR_TYPE;
- } else if (count<0){
- return MPI_ERR_COUNT;
- } else {
- return simgrid::smpi::Datatype::create_hindexed(count, blocklens, indices, old_type, new_type);
- }
-}
-
-int PMPI_Type_create_hindexed(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype old_type,
- MPI_Datatype* new_type) {
- return PMPI_Type_hindexed(count, blocklens,indices,old_type,new_type);
-}
-
-int PMPI_Type_create_hindexed_block(int count, int blocklength, MPI_Aint* indices, MPI_Datatype old_type,
- MPI_Datatype* new_type) {
- if (old_type == MPI_DATATYPE_NULL) {
- return MPI_ERR_TYPE;
- } else if (count<0){
- return MPI_ERR_COUNT;
- } else {
- int* blocklens=(int*)xbt_malloc(blocklength*count*sizeof(int));
- for (int i = 0; i < count; i++)
- blocklens[i] = blocklength;
- int retval = simgrid::smpi::Datatype::create_hindexed(count, blocklens, indices, old_type, new_type);
- xbt_free(blocklens);
- return retval;
- }
-}
-
-int PMPI_Type_struct(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype* old_types, MPI_Datatype* new_type) {
- if (count<0){
- return MPI_ERR_COUNT;
- } else {
- return simgrid::smpi::Datatype::create_struct(count, blocklens, indices, old_types, new_type);
- }
-}
-
-int PMPI_Type_create_struct(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype* old_types,
- MPI_Datatype* new_type) {
- return PMPI_Type_struct(count, blocklens, indices, old_types, new_type);
-}
-
-int PMPI_Error_class(int errorcode, int* errorclass) {
- // assume smpi uses only standard mpi error codes
- *errorclass=errorcode;
- return MPI_SUCCESS;
-}
-
-int PMPI_Initialized(int* flag) {
- *flag=(smpi_process()!=nullptr && smpi_process()->initialized());
- return MPI_SUCCESS;
-}
-
-/* The topo part of MPI_COMM_WORLD should always be nullptr. When other topologies will be implemented, not only should we
- * check if the topology is nullptr, but we should check if it is the good topology type (so we have to add a
- * MPIR_Topo_Type field, and replace the MPI_Topology field by an union)*/
-
-int PMPI_Cart_create(MPI_Comm comm_old, int ndims, int* dims, int* periodic, int reorder, MPI_Comm* comm_cart) {
- if (comm_old == MPI_COMM_NULL){
- return MPI_ERR_COMM;
- } else if (ndims < 0 || (ndims > 0 && (dims == nullptr || periodic == nullptr)) || comm_cart == nullptr) {
- return MPI_ERR_ARG;
- } else{
- simgrid::smpi::Topo_Cart* topo = new simgrid::smpi::Topo_Cart(comm_old, ndims, dims, periodic, reorder, comm_cart);
- if(*comm_cart==MPI_COMM_NULL)
- delete topo;
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Cart_rank(MPI_Comm comm, int* coords, int* rank) {
- if(comm == MPI_COMM_NULL || comm->topo() == nullptr) {
- return MPI_ERR_TOPOLOGY;
- }
- if (coords == nullptr) {
- return MPI_ERR_ARG;
- }
- MPIR_Cart_Topology topo = static_cast<MPIR_Cart_Topology>(comm->topo());
- if (topo==nullptr) {
- return MPI_ERR_ARG;
- }
- return topo->rank(coords, rank);
-}
-
-int PMPI_Cart_shift(MPI_Comm comm, int direction, int displ, int* source, int* dest) {
- if(comm == MPI_COMM_NULL || comm->topo() == nullptr) {
- return MPI_ERR_TOPOLOGY;
- }
- if (source == nullptr || dest == nullptr || direction < 0 ) {
- return MPI_ERR_ARG;
- }
- MPIR_Cart_Topology topo = static_cast<MPIR_Cart_Topology>(comm->topo());
- if (topo==nullptr) {
- return MPI_ERR_ARG;
- }
- return topo->shift(direction, displ, source, dest);
-}
-
-int PMPI_Cart_coords(MPI_Comm comm, int rank, int maxdims, int* coords) {
- if(comm == MPI_COMM_NULL || comm->topo() == nullptr) {
- return MPI_ERR_TOPOLOGY;
- }
- if (rank < 0 || rank >= comm->size()) {
- return MPI_ERR_RANK;
- }
- if (maxdims <= 0) {
- return MPI_ERR_ARG;
- }
- if(coords == nullptr) {
- return MPI_ERR_ARG;
- }
- MPIR_Cart_Topology topo = static_cast<MPIR_Cart_Topology>(comm->topo());
- if (topo==nullptr) {
- return MPI_ERR_ARG;
- }
- return topo->coords(rank, maxdims, coords);
-}
-
-int PMPI_Cart_get(MPI_Comm comm, int maxdims, int* dims, int* periods, int* coords) {
- if(comm == nullptr || comm->topo() == nullptr) {
- return MPI_ERR_TOPOLOGY;
- }
- if(maxdims <= 0 || dims == nullptr || periods == nullptr || coords == nullptr) {
- return MPI_ERR_ARG;
- }
- MPIR_Cart_Topology topo = static_cast<MPIR_Cart_Topology>(comm->topo());
- if (topo==nullptr) {
- return MPI_ERR_ARG;
- }
- return topo->get(maxdims, dims, periods, coords);
-}
-
-int PMPI_Cartdim_get(MPI_Comm comm, int* ndims) {
- if (comm == MPI_COMM_NULL || comm->topo() == nullptr) {
- return MPI_ERR_TOPOLOGY;
- }
- if (ndims == nullptr) {
- return MPI_ERR_ARG;
- }
- MPIR_Cart_Topology topo = static_cast<MPIR_Cart_Topology>(comm->topo());
- if (topo==nullptr) {
- return MPI_ERR_ARG;
- }
- return topo->dim_get(ndims);
-}
-
-int PMPI_Dims_create(int nnodes, int ndims, int* dims) {
- if(dims == nullptr) {
- return MPI_ERR_ARG;
- }
- if (ndims < 1 || nnodes < 1) {
- return MPI_ERR_DIMS;
- }
- return simgrid::smpi::Topo_Cart::Dims_create(nnodes, ndims, dims);
-}
-
-int PMPI_Cart_sub(MPI_Comm comm, int* remain_dims, MPI_Comm* comm_new) {
- if(comm == MPI_COMM_NULL || comm->topo() == nullptr) {
- return MPI_ERR_TOPOLOGY;
- }
- if (comm_new == nullptr) {
- return MPI_ERR_ARG;
- }
- MPIR_Cart_Topology topo = static_cast<MPIR_Cart_Topology>(comm->topo());
- if (topo==nullptr) {
- return MPI_ERR_ARG;
- }
- MPIR_Cart_Topology cart = topo->sub(remain_dims, comm_new);
- if(*comm_new==MPI_COMM_NULL)
- delete cart;
- if(cart==nullptr)
- return MPI_ERR_ARG;
- return MPI_SUCCESS;
-}
-
-int PMPI_Type_create_resized(MPI_Datatype oldtype,MPI_Aint lb, MPI_Aint extent, MPI_Datatype *newtype){
- if (oldtype == MPI_DATATYPE_NULL) {
- return MPI_ERR_TYPE;
- }
- int blocks[3] = {1, 1, 1};
- MPI_Aint disps[3] = {lb, 0, lb + extent};
- MPI_Datatype types[3] = {MPI_LB, oldtype, MPI_UB};
-
- *newtype = new simgrid::smpi::Type_Struct(oldtype->size(), lb, lb + extent, DT_FLAG_DERIVED, 3, blocks, disps, types);
-
- (*newtype)->addflag(~DT_FLAG_COMMITED);
- return MPI_SUCCESS;
-}
-
-int PMPI_Win_create( void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, MPI_Win *win){
- int retval = 0;
- smpi_bench_end();
- if (comm == MPI_COMM_NULL) {
- retval= MPI_ERR_COMM;
- }else if ((base == nullptr && size != 0) || disp_unit <= 0 || size < 0 ){
- retval= MPI_ERR_OTHER;
- }else{
- *win = new simgrid::smpi::Win( base, size, disp_unit, info, comm);
- retval = MPI_SUCCESS;
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Win_allocate( MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, void *base, MPI_Win *win){
- int retval = 0;
- smpi_bench_end();
- if (comm == MPI_COMM_NULL) {
- retval= MPI_ERR_COMM;
- }else if (disp_unit <= 0 || size < 0 ){
- retval= MPI_ERR_OTHER;
- }else{
- void* ptr = xbt_malloc(size);
- if(ptr==nullptr)
- return MPI_ERR_NO_MEM;
- *static_cast<void**>(base) = ptr;
- *win = new simgrid::smpi::Win( ptr, size, disp_unit, info, comm,1);
- retval = MPI_SUCCESS;
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Win_create_dynamic( MPI_Info info, MPI_Comm comm, MPI_Win *win){
- int retval = 0;
- smpi_bench_end();
- if (comm == MPI_COMM_NULL) {
- retval= MPI_ERR_COMM;
- }else{
- *win = new simgrid::smpi::Win(info, comm);
- retval = MPI_SUCCESS;
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Win_attach(MPI_Win win, void *base, MPI_Aint size){
- int retval = 0;
- smpi_bench_end();
- if(win == MPI_WIN_NULL){
- retval = MPI_ERR_WIN;
- } else if ((base == nullptr && size != 0) || size < 0 ){
- retval= MPI_ERR_OTHER;
- }else{
- retval = win->attach(base, size);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Win_detach(MPI_Win win, void *base){
- int retval = 0;
- smpi_bench_end();
- if(win == MPI_WIN_NULL){
- retval = MPI_ERR_WIN;
- } else if (base == nullptr){
- retval= MPI_ERR_OTHER;
- }else{
- retval = win->detach(base);
- }
- smpi_bench_begin();
- return retval;
-}
-
-
-int PMPI_Win_free( MPI_Win* win){
- int retval = 0;
- smpi_bench_end();
- if (win == nullptr || *win == MPI_WIN_NULL) {
- retval = MPI_ERR_WIN;
- }else{
- delete *win;
- retval=MPI_SUCCESS;
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Win_set_name(MPI_Win win, char * name)
-{
- if (win == MPI_WIN_NULL) {
- return MPI_ERR_TYPE;
- } else if (name == nullptr) {
- return MPI_ERR_ARG;
- } else {
- win->set_name(name);
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Win_get_name(MPI_Win win, char * name, int* len)
-{
- if (win == MPI_WIN_NULL) {
- return MPI_ERR_WIN;
- } else if (name == nullptr) {
- return MPI_ERR_ARG;
- } else {
- win->get_name(name, len);
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Win_get_info(MPI_Win win, MPI_Info* info)
-{
- if (win == MPI_WIN_NULL) {
- return MPI_ERR_WIN;
- } else {
- *info = win->info();
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Win_set_info(MPI_Win win, MPI_Info info)
-{
- if (win == MPI_WIN_NULL) {
- return MPI_ERR_TYPE;
- } else {
- win->set_info(info);
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Win_get_group(MPI_Win win, MPI_Group * group){
- if (win == MPI_WIN_NULL) {
- return MPI_ERR_WIN;
- }else {
- win->get_group(group);
- (*group)->ref();
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Win_fence( int assert, MPI_Win win){
- int retval = 0;
- smpi_bench_end();
- if (win == MPI_WIN_NULL) {
- retval = MPI_ERR_WIN;
- } else {
- int rank = smpi_process()->index();
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr);
- retval = win->fence(assert);
- TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
- MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win){
- int retval = 0;
- smpi_bench_end();
- if (win == MPI_WIN_NULL) {
- retval = MPI_ERR_WIN;
- } else if (target_rank == MPI_PROC_NULL) {
- retval = MPI_SUCCESS;
- } else if (target_rank <0){
- retval = MPI_ERR_RANK;
- } else if (win->dynamic()==0 && target_disp <0){
- //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
- retval = MPI_ERR_ARG;
- } else if ((origin_count < 0 || target_count < 0) ||
- (origin_addr==nullptr && origin_count > 0)){
- retval = MPI_ERR_COUNT;
- } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) {
- retval = MPI_ERR_TYPE;
- } else {
- int rank = smpi_process()->index();
- MPI_Group group;
- win->get_group(&group);
- int src_traced = group->index(target_rank);
- TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
-
- retval = win->get( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
- target_datatype);
-
- TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Rget( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
- MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request* request){
- int retval = 0;
- smpi_bench_end();
- if (win == MPI_WIN_NULL) {
- retval = MPI_ERR_WIN;
- } else if (target_rank == MPI_PROC_NULL) {
- *request = MPI_REQUEST_NULL;
- retval = MPI_SUCCESS;
- } else if (target_rank <0){
- retval = MPI_ERR_RANK;
- } else if (win->dynamic()==0 && target_disp <0){
- //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
- retval = MPI_ERR_ARG;
- } else if ((origin_count < 0 || target_count < 0) ||
- (origin_addr==nullptr && origin_count > 0)){
- retval = MPI_ERR_COUNT;
- } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) {
- retval = MPI_ERR_TYPE;
- } else if(request == nullptr){
- retval = MPI_ERR_REQUEST;
- } else {
- int rank = smpi_process()->index();
- MPI_Group group;
- win->get_group(&group);
- int src_traced = group->index(target_rank);
- TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
-
- retval = win->get( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
- target_datatype, request);
-
- TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
- MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win){
- int retval = 0;
- smpi_bench_end();
- if (win == MPI_WIN_NULL) {
- retval = MPI_ERR_WIN;
- } else if (target_rank == MPI_PROC_NULL) {
- retval = MPI_SUCCESS;
- } else if (target_rank <0){
- retval = MPI_ERR_RANK;
- } else if (win->dynamic()==0 && target_disp <0){
- //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
- retval = MPI_ERR_ARG;
- } else if ((origin_count < 0 || target_count < 0) ||
- (origin_addr==nullptr && origin_count > 0)){
- retval = MPI_ERR_COUNT;
- } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) {
- retval = MPI_ERR_TYPE;
- } else {
- int rank = smpi_process()->index();
- MPI_Group group;
- win->get_group(&group);
- int dst_traced = group->index(target_rank);
- TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, nullptr);
- TRACE_smpi_send(rank, rank, dst_traced, SMPI_RMA_TAG, origin_count*origin_datatype->size());
-
- retval = win->put( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
- target_datatype);
-
- TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Rput( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
- MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request* request){
- int retval = 0;
- smpi_bench_end();
- if (win == MPI_WIN_NULL) {
- retval = MPI_ERR_WIN;
- } else if (target_rank == MPI_PROC_NULL) {
- *request = MPI_REQUEST_NULL;
- retval = MPI_SUCCESS;
- } else if (target_rank <0){
- retval = MPI_ERR_RANK;
- } else if (win->dynamic()==0 && target_disp <0){
- //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
- retval = MPI_ERR_ARG;
- } else if ((origin_count < 0 || target_count < 0) ||
- (origin_addr==nullptr && origin_count > 0)){
- retval = MPI_ERR_COUNT;
- } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) {
- retval = MPI_ERR_TYPE;
- } else if(request == nullptr){
- retval = MPI_ERR_REQUEST;
- } else {
- int rank = smpi_process()->index();
- MPI_Group group;
- win->get_group(&group);
- int dst_traced = group->index(target_rank);
- TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, nullptr);
- TRACE_smpi_send(rank, rank, dst_traced, SMPI_RMA_TAG, origin_count*origin_datatype->size());
-
- retval = win->put( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
- target_datatype, request);
-
- TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
- MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win){
- int retval = 0;
- smpi_bench_end();
- if (win == MPI_WIN_NULL) {
- retval = MPI_ERR_WIN;
- } else if (target_rank == MPI_PROC_NULL) {
- retval = MPI_SUCCESS;
- } else if (target_rank <0){
- retval = MPI_ERR_RANK;
- } else if (win->dynamic()==0 && target_disp <0){
- //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
- retval = MPI_ERR_ARG;
- } else if ((origin_count < 0 || target_count < 0) ||
- (origin_addr==nullptr && origin_count > 0)){
- retval = MPI_ERR_COUNT;
- } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) {
- retval = MPI_ERR_TYPE;
- } else if (op == MPI_OP_NULL) {
- retval = MPI_ERR_OP;
- } else {
- int rank = smpi_process()->index();
- MPI_Group group;
- win->get_group(&group);
- int src_traced = group->index(target_rank);
- TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
-
- retval = win->accumulate( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
- target_datatype, op);
-
- TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Raccumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
- MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request){
- int retval = 0;
- smpi_bench_end();
- if (win == MPI_WIN_NULL) {
- retval = MPI_ERR_WIN;
- } else if (target_rank == MPI_PROC_NULL) {
- *request = MPI_REQUEST_NULL;
- retval = MPI_SUCCESS;
- } else if (target_rank <0){
- retval = MPI_ERR_RANK;
- } else if (win->dynamic()==0 && target_disp <0){
- //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
- retval = MPI_ERR_ARG;
- } else if ((origin_count < 0 || target_count < 0) ||
- (origin_addr==nullptr && origin_count > 0)){
- retval = MPI_ERR_COUNT;
- } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) {
- retval = MPI_ERR_TYPE;
- } else if (op == MPI_OP_NULL) {
- retval = MPI_ERR_OP;
- } else if(request == nullptr){
- retval = MPI_ERR_REQUEST;
- } else {
- int rank = smpi_process()->index();
- MPI_Group group;
- win->get_group(&group);
- int src_traced = group->index(target_rank);
- TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
-
- retval = win->accumulate( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
- target_datatype, op, request);
-
- TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Get_accumulate(void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr,
-int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count,
-MPI_Datatype target_datatype, MPI_Op op, MPI_Win win){
- int retval = 0;
- smpi_bench_end();
- if (win == MPI_WIN_NULL) {
- retval = MPI_ERR_WIN;
- } else if (target_rank == MPI_PROC_NULL) {
- retval = MPI_SUCCESS;
- } else if (target_rank <0){
- retval = MPI_ERR_RANK;
- } else if (win->dynamic()==0 && target_disp <0){
- //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
- retval = MPI_ERR_ARG;
- } else if ((origin_count < 0 || target_count < 0 || result_count <0) ||
- (origin_addr==nullptr && origin_count > 0 && op != MPI_NO_OP) ||
- (result_addr==nullptr && result_count > 0)){
- retval = MPI_ERR_COUNT;
- } else if ((origin_datatype != MPI_DATATYPE_NULL && not origin_datatype->is_valid()) ||
- (not target_datatype->is_valid()) || (not result_datatype->is_valid())) {
- retval = MPI_ERR_TYPE;
- } else if (op == MPI_OP_NULL) {
- retval = MPI_ERR_OP;
- } else {
- int rank = smpi_process()->index();
- MPI_Group group;
- win->get_group(&group);
- int src_traced = group->index(target_rank);
- TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
-
- retval = win->get_accumulate( origin_addr, origin_count, origin_datatype, result_addr,
- result_count, result_datatype, target_rank, target_disp,
- target_count, target_datatype, op);
-
- TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-
-int PMPI_Rget_accumulate(void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr,
-int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count,
-MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request){
- int retval = 0;
- smpi_bench_end();
- if (win == MPI_WIN_NULL) {
- retval = MPI_ERR_WIN;
- } else if (target_rank == MPI_PROC_NULL) {
- *request = MPI_REQUEST_NULL;
- retval = MPI_SUCCESS;
- } else if (target_rank <0){
- retval = MPI_ERR_RANK;
- } else if (win->dynamic()==0 && target_disp <0){
- //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
- retval = MPI_ERR_ARG;
- } else if ((origin_count < 0 || target_count < 0 || result_count <0) ||
- (origin_addr==nullptr && origin_count > 0 && op != MPI_NO_OP) ||
- (result_addr==nullptr && result_count > 0)){
- retval = MPI_ERR_COUNT;
- } else if ((origin_datatype != MPI_DATATYPE_NULL && not origin_datatype->is_valid()) ||
- (not target_datatype->is_valid()) || (not result_datatype->is_valid())) {
- retval = MPI_ERR_TYPE;
- } else if (op == MPI_OP_NULL) {
- retval = MPI_ERR_OP;
- } else if(request == nullptr){
- retval = MPI_ERR_REQUEST;
- } else {
- int rank = smpi_process()->index();
- MPI_Group group;
- win->get_group(&group);
- int src_traced = group->index(target_rank);
- TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
-
- retval = win->get_accumulate( origin_addr, origin_count, origin_datatype, result_addr,
- result_count, result_datatype, target_rank, target_disp,
- target_count, target_datatype, op, request);
-
- TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Fetch_and_op(void *origin_addr, void *result_addr, MPI_Datatype dtype, int target_rank, MPI_Aint target_disp, MPI_Op op, MPI_Win win){
- return PMPI_Get_accumulate(origin_addr, origin_addr==nullptr?0:1, dtype, result_addr, 1, dtype, target_rank, target_disp, 1, dtype, op, win);
-}
-
-int PMPI_Compare_and_swap(void *origin_addr, void *compare_addr,
- void *result_addr, MPI_Datatype datatype, int target_rank,
- MPI_Aint target_disp, MPI_Win win){
- int retval = 0;
- smpi_bench_end();
- if (win == MPI_WIN_NULL) {
- retval = MPI_ERR_WIN;
- } else if (target_rank == MPI_PROC_NULL) {
- retval = MPI_SUCCESS;
- } else if (target_rank <0){
- retval = MPI_ERR_RANK;
- } else if (win->dynamic()==0 && target_disp <0){
- //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
- retval = MPI_ERR_ARG;
- } else if (origin_addr==nullptr || result_addr==nullptr || compare_addr==nullptr){
- retval = MPI_ERR_COUNT;
- } else if (not datatype->is_valid()) {
- retval = MPI_ERR_TYPE;
- } else {
- int rank = smpi_process()->index();
- MPI_Group group;
- win->get_group(&group);
- int src_traced = group->index(target_rank);
- TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
-
- retval = win->compare_and_swap( origin_addr, compare_addr, result_addr, datatype,
- target_rank, target_disp);
-
- TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Win_post(MPI_Group group, int assert, MPI_Win win){
- int retval = 0;
- smpi_bench_end();
- if (win == MPI_WIN_NULL) {
- retval = MPI_ERR_WIN;
- } else if (group==MPI_GROUP_NULL){
- retval = MPI_ERR_GROUP;
- } else {
- int rank = smpi_process()->index();
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr);
- retval = win->post(group,assert);
- TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Win_start(MPI_Group group, int assert, MPI_Win win){
- int retval = 0;
- smpi_bench_end();
- if (win == MPI_WIN_NULL) {
- retval = MPI_ERR_WIN;
- } else if (group==MPI_GROUP_NULL){
- retval = MPI_ERR_GROUP;
- } else {
- int rank = smpi_process()->index();
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr);
- retval = win->start(group,assert);
- TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Win_complete(MPI_Win win){
- int retval = 0;
- smpi_bench_end();
- if (win == MPI_WIN_NULL) {
- retval = MPI_ERR_WIN;
- } else {
- int rank = smpi_process()->index();
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr);
-
- retval = win->complete();
-
- TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Win_wait(MPI_Win win){
- int retval = 0;
- smpi_bench_end();
- if (win == MPI_WIN_NULL) {
- retval = MPI_ERR_WIN;
- } else {
- int rank = smpi_process()->index();
- TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr);
-
- retval = win->wait();
-
- TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Win_lock(int lock_type, int rank, int assert, MPI_Win win){
- int retval = 0;
- smpi_bench_end();
- if (win == MPI_WIN_NULL) {
- retval = MPI_ERR_WIN;
- } else if (lock_type != MPI_LOCK_EXCLUSIVE &&
- lock_type != MPI_LOCK_SHARED) {
- retval = MPI_ERR_LOCKTYPE;
- } else if (rank == MPI_PROC_NULL){
- retval = MPI_SUCCESS;
- } else {
- int myrank = smpi_process()->index();
- TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
- retval = win->lock(lock_type,rank,assert);
- TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Win_unlock(int rank, MPI_Win win){
- int retval = 0;
- smpi_bench_end();
- if (win == MPI_WIN_NULL) {
- retval = MPI_ERR_WIN;
- } else if (rank == MPI_PROC_NULL){
- retval = MPI_SUCCESS;
- } else {
- int myrank = smpi_process()->index();
- TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
- retval = win->unlock(rank);
- TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Win_lock_all(int assert, MPI_Win win){
- int retval = 0;
- smpi_bench_end();
- if (win == MPI_WIN_NULL) {
- retval = MPI_ERR_WIN;
- } else {
- int myrank = smpi_process()->index();
- TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
- retval = win->lock_all(assert);
- TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Win_unlock_all(MPI_Win win){
- int retval = 0;
- smpi_bench_end();
- if (win == MPI_WIN_NULL) {
- retval = MPI_ERR_WIN;
- } else {
- int myrank = smpi_process()->index();
- TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
- retval = win->unlock_all();
- TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Win_flush(int rank, MPI_Win win){
- int retval = 0;
- smpi_bench_end();
- if (win == MPI_WIN_NULL) {
- retval = MPI_ERR_WIN;
- } else if (rank == MPI_PROC_NULL){
- retval = MPI_SUCCESS;
- } else {
- int myrank = smpi_process()->index();
- TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
- retval = win->flush(rank);
- TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Win_flush_local(int rank, MPI_Win win){
- int retval = 0;
- smpi_bench_end();
- if (win == MPI_WIN_NULL) {
- retval = MPI_ERR_WIN;
- } else if (rank == MPI_PROC_NULL){
- retval = MPI_SUCCESS;
- } else {
- int myrank = smpi_process()->index();
- TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
- retval = win->flush_local(rank);
- TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Win_flush_all(MPI_Win win){
- int retval = 0;
- smpi_bench_end();
- if (win == MPI_WIN_NULL) {
- retval = MPI_ERR_WIN;
- } else {
- int myrank = smpi_process()->index();
- TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
- retval = win->flush_all();
- TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Win_flush_local_all(MPI_Win win){
- int retval = 0;
- smpi_bench_end();
- if (win == MPI_WIN_NULL) {
- retval = MPI_ERR_WIN;
- } else {
- int myrank = smpi_process()->index();
- TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
- retval = win->flush_local_all();
- TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
- }
- smpi_bench_begin();
- return retval;
-}
-
-int PMPI_Alloc_mem(MPI_Aint size, MPI_Info info, void *baseptr){
- void *ptr = xbt_malloc(size);
- if(ptr==nullptr)
- return MPI_ERR_NO_MEM;
- else {
- *static_cast<void**>(baseptr) = ptr;
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Free_mem(void *baseptr){
- xbt_free(baseptr);
- return MPI_SUCCESS;
-}
-
-int PMPI_Type_set_name(MPI_Datatype datatype, char * name)
-{
- if (datatype == MPI_DATATYPE_NULL) {
- return MPI_ERR_TYPE;
- } else if (name == nullptr) {
- return MPI_ERR_ARG;
- } else {
- datatype->set_name(name);
- return MPI_SUCCESS;
- }
-}
-
-int PMPI_Type_get_name(MPI_Datatype datatype, char * name, int* len)
-{
- if (datatype == MPI_DATATYPE_NULL) {
- return MPI_ERR_TYPE;
- } else if (name == nullptr) {
- return MPI_ERR_ARG;
- } else {
- datatype->get_name(name, len);
- return MPI_SUCCESS;
- }
-}
-
-MPI_Datatype PMPI_Type_f2c(MPI_Fint datatype){
- return static_cast<MPI_Datatype>(simgrid::smpi::F2C::f2c(datatype));
-}
-
-MPI_Fint PMPI_Type_c2f(MPI_Datatype datatype){
- return datatype->c2f();
-}
-
-MPI_Group PMPI_Group_f2c(MPI_Fint group){
- return simgrid::smpi::Group::f2c(group);
-}
-
-MPI_Fint PMPI_Group_c2f(MPI_Group group){
- return group->c2f();
-}
-
-MPI_Request PMPI_Request_f2c(MPI_Fint request){
- return static_cast<MPI_Request>(simgrid::smpi::Request::f2c(request));
-}
-
-MPI_Fint PMPI_Request_c2f(MPI_Request request) {
- return request->c2f();
-}
-
-MPI_Win PMPI_Win_f2c(MPI_Fint win){
- return static_cast<MPI_Win>(simgrid::smpi::Win::f2c(win));
-}
-
-MPI_Fint PMPI_Win_c2f(MPI_Win win){
- return win->c2f();
-}
-
-MPI_Op PMPI_Op_f2c(MPI_Fint op){
- return static_cast<MPI_Op>(simgrid::smpi::Op::f2c(op));
-}
-
-MPI_Fint PMPI_Op_c2f(MPI_Op op){
- return op->c2f();
-}
-
-MPI_Comm PMPI_Comm_f2c(MPI_Fint comm){
- return static_cast<MPI_Comm>(simgrid::smpi::Comm::f2c(comm));
-}
-
-MPI_Fint PMPI_Comm_c2f(MPI_Comm comm){
- return comm->c2f();
-}
-
-MPI_Info PMPI_Info_f2c(MPI_Fint info){
- return static_cast<MPI_Info>(simgrid::smpi::Info::f2c(info));
-}
-
-MPI_Fint PMPI_Info_c2f(MPI_Info info){
- return info->c2f();
-}
-
-int PMPI_Keyval_create(MPI_Copy_function* copy_fn, MPI_Delete_function* delete_fn, int* keyval, void* extra_state) {
- smpi_copy_fn _copy_fn={copy_fn,nullptr,nullptr};
- smpi_delete_fn _delete_fn={delete_fn,nullptr,nullptr};
- return simgrid::smpi::Keyval::keyval_create<simgrid::smpi::Comm>(_copy_fn, _delete_fn, keyval, extra_state);
+int PMPI_Keyval_create(MPI_Copy_function* copy_fn, MPI_Delete_function* delete_fn, int* keyval, void* extra_state) {
+ smpi_copy_fn _copy_fn={copy_fn,nullptr,nullptr};
+ smpi_delete_fn _delete_fn={delete_fn,nullptr,nullptr};
+ return simgrid::smpi::Keyval::keyval_create<simgrid::smpi::Comm>(_copy_fn, _delete_fn, keyval, extra_state);
}
int PMPI_Keyval_free(int* keyval) {
return simgrid::smpi::Keyval::keyval_free<simgrid::smpi::Comm>(keyval);
}
-int PMPI_Attr_delete(MPI_Comm comm, int keyval) {
- if(keyval == MPI_TAG_UB||keyval == MPI_HOST||keyval == MPI_IO ||keyval == MPI_WTIME_IS_GLOBAL||keyval == MPI_APPNUM
- ||keyval == MPI_UNIVERSE_SIZE||keyval == MPI_LASTUSEDCODE)
- return MPI_ERR_ARG;
- else if (comm==MPI_COMM_NULL)
- return MPI_ERR_COMM;
- else
- return comm->attr_delete<simgrid::smpi::Comm>(keyval);
-}
-
-int PMPI_Attr_get(MPI_Comm comm, int keyval, void* attr_value, int* flag) {
- static int one = 1;
- static int zero = 0;
- static int tag_ub = INT_MAX;
- static int last_used_code = MPI_ERR_LASTCODE;
-
- if (comm==MPI_COMM_NULL){
- *flag = 0;
- return MPI_ERR_COMM;
- }
-
- switch (keyval) {
- case MPI_HOST:
- case MPI_IO:
- case MPI_APPNUM:
- *flag = 1;
- *static_cast<int**>(attr_value) = &zero;
- return MPI_SUCCESS;
- case MPI_UNIVERSE_SIZE:
- *flag = 1;
- *static_cast<int**>(attr_value) = &smpi_universe_size;
- return MPI_SUCCESS;
- case MPI_LASTUSEDCODE:
- *flag = 1;
- *static_cast<int**>(attr_value) = &last_used_code;
- return MPI_SUCCESS;
- case MPI_TAG_UB:
- *flag=1;
- *static_cast<int**>(attr_value) = &tag_ub;
- return MPI_SUCCESS;
- case MPI_WTIME_IS_GLOBAL:
- *flag = 1;
- *static_cast<int**>(attr_value) = &one;
- return MPI_SUCCESS;
- default:
- return comm->attr_get<simgrid::smpi::Comm>(keyval, attr_value, flag);
- }
-}
-
-int PMPI_Attr_put(MPI_Comm comm, int keyval, void* attr_value) {
- if(keyval == MPI_TAG_UB||keyval == MPI_HOST||keyval == MPI_IO ||keyval == MPI_WTIME_IS_GLOBAL||keyval == MPI_APPNUM
- ||keyval == MPI_UNIVERSE_SIZE||keyval == MPI_LASTUSEDCODE)
- return MPI_ERR_ARG;
- else if (comm==MPI_COMM_NULL)
- return MPI_ERR_COMM;
- else
- return comm->attr_put<simgrid::smpi::Comm>(keyval, attr_value);
-}
-
-int PMPI_Comm_get_attr (MPI_Comm comm, int comm_keyval, void *attribute_val, int *flag)
-{
- return PMPI_Attr_get(comm, comm_keyval, attribute_val,flag);
-}
-
-int PMPI_Comm_set_attr (MPI_Comm comm, int comm_keyval, void *attribute_val)
-{
- return PMPI_Attr_put(comm, comm_keyval, attribute_val);
-}
-
-int PMPI_Comm_delete_attr (MPI_Comm comm, int comm_keyval)
-{
- return PMPI_Attr_delete(comm, comm_keyval);
-}
-
-int PMPI_Comm_create_keyval(MPI_Comm_copy_attr_function* copy_fn, MPI_Comm_delete_attr_function* delete_fn, int* keyval,
- void* extra_state)
-{
- return PMPI_Keyval_create(copy_fn, delete_fn, keyval, extra_state);
-}
-
-int PMPI_Comm_free_keyval(int* keyval) {
- return PMPI_Keyval_free(keyval);
-}
-
-int PMPI_Type_get_attr (MPI_Datatype type, int type_keyval, void *attribute_val, int* flag)
-{
- if (type==MPI_DATATYPE_NULL)
- return MPI_ERR_TYPE;
- else
- return type->attr_get<simgrid::smpi::Datatype>(type_keyval, attribute_val, flag);
-}
-
-int PMPI_Type_set_attr (MPI_Datatype type, int type_keyval, void *attribute_val)
-{
- if (type==MPI_DATATYPE_NULL)
- return MPI_ERR_TYPE;
- else
- return type->attr_put<simgrid::smpi::Datatype>(type_keyval, attribute_val);
-}
-
-int PMPI_Type_delete_attr (MPI_Datatype type, int type_keyval)
-{
- if (type==MPI_DATATYPE_NULL)
- return MPI_ERR_TYPE;
- else
- return type->attr_delete<simgrid::smpi::Datatype>(type_keyval);
-}
-
-int PMPI_Type_create_keyval(MPI_Type_copy_attr_function* copy_fn, MPI_Type_delete_attr_function* delete_fn, int* keyval,
- void* extra_state)
-{
- smpi_copy_fn _copy_fn={nullptr,copy_fn,nullptr};
- smpi_delete_fn _delete_fn={nullptr,delete_fn,nullptr};
- return simgrid::smpi::Keyval::keyval_create<simgrid::smpi::Datatype>(_copy_fn, _delete_fn, keyval, extra_state);
-}
-
-int PMPI_Type_free_keyval(int* keyval) {
- return simgrid::smpi::Keyval::keyval_free<simgrid::smpi::Datatype>(keyval);
-}
-
-int PMPI_Win_get_attr (MPI_Win win, int keyval, void *attribute_val, int* flag)
-{
- static MPI_Aint size;
- static int disp_unit;
- if (win==MPI_WIN_NULL)
- return MPI_ERR_TYPE;
- else{
- switch (keyval) {
- case MPI_WIN_BASE :
- *static_cast<void**>(attribute_val) = win->base();
- *flag = 1;
- return MPI_SUCCESS;
- case MPI_WIN_SIZE :
- size = win->size();
- *static_cast<MPI_Aint**>(attribute_val) = &size;
- *flag = 1;
- return MPI_SUCCESS;
- case MPI_WIN_DISP_UNIT :
- disp_unit=win->disp_unit();
- *static_cast<int**>(attribute_val) = &disp_unit;
- *flag = 1;
- return MPI_SUCCESS;
- default:
- return win->attr_get<simgrid::smpi::Win>(keyval, attribute_val, flag);
- }
-}
-
-}
-
-int PMPI_Win_set_attr (MPI_Win win, int type_keyval, void *attribute_val)
-{
- if (win==MPI_WIN_NULL)
- return MPI_ERR_TYPE;
- else
- return win->attr_put<simgrid::smpi::Win>(type_keyval, attribute_val);
-}
-
-int PMPI_Win_delete_attr (MPI_Win win, int type_keyval)
-{
- if (win==MPI_WIN_NULL)
- return MPI_ERR_TYPE;
- else
- return win->attr_delete<simgrid::smpi::Win>(type_keyval);
-}
-
-int PMPI_Win_create_keyval(MPI_Win_copy_attr_function* copy_fn, MPI_Win_delete_attr_function* delete_fn, int* keyval,
- void* extra_state)
-{
- smpi_copy_fn _copy_fn={nullptr, nullptr, copy_fn};
- smpi_delete_fn _delete_fn={nullptr, nullptr, delete_fn};
- return simgrid::smpi::Keyval::keyval_create<simgrid::smpi::Win>(_copy_fn, _delete_fn, keyval, extra_state);
-}
-
-int PMPI_Win_free_keyval(int* keyval) {
- return simgrid::smpi::Keyval::keyval_free<simgrid::smpi::Win>(keyval);
-}
-
-int PMPI_Info_create( MPI_Info *info){
- if (info == nullptr)
- return MPI_ERR_ARG;
- *info = new simgrid::smpi::Info();
- return MPI_SUCCESS;
-}
-
-int PMPI_Info_set( MPI_Info info, char *key, char *value){
- if (info == nullptr || key == nullptr || value == nullptr)
- return MPI_ERR_ARG;
- info->set(key, value);
- return MPI_SUCCESS;
-}
-
-int PMPI_Info_free( MPI_Info *info){
- if (info == nullptr || *info==nullptr)
- return MPI_ERR_ARG;
- simgrid::smpi::Info::unref(*info);
- *info=MPI_INFO_NULL;
- return MPI_SUCCESS;
-}
-
-int PMPI_Info_get(MPI_Info info,char *key,int valuelen, char *value, int *flag){
- *flag=false;
- if (info == nullptr || key == nullptr || valuelen <0)
- return MPI_ERR_ARG;
- if (value == nullptr)
- return MPI_ERR_INFO_VALUE;
- return info->get(key, valuelen, value, flag);
-}
-
-int PMPI_Info_dup(MPI_Info info, MPI_Info *newinfo){
- if (info == nullptr || newinfo==nullptr)
- return MPI_ERR_ARG;
- *newinfo = new simgrid::smpi::Info(info);
- return MPI_SUCCESS;
-}
-
-int PMPI_Info_delete(MPI_Info info, char *key){
- if (info == nullptr || key==nullptr)
- return MPI_ERR_ARG;
- return info->remove(key);
-}
-
-int PMPI_Info_get_nkeys( MPI_Info info, int *nkeys){
- if (info == nullptr || nkeys==nullptr)
- return MPI_ERR_ARG;
- return info->get_nkeys(nkeys);
-}
-
-int PMPI_Info_get_nthkey( MPI_Info info, int n, char *key){
- if (info == nullptr || key==nullptr || n<0 || n> MPI_MAX_INFO_KEY)
- return MPI_ERR_ARG;
- return info->get_nthkey(n, key);
-}
-
-int PMPI_Info_get_valuelen( MPI_Info info, char *key, int *valuelen, int *flag){
- *flag=false;
- if (info == nullptr || key == nullptr || valuelen==nullptr)
- return MPI_ERR_ARG;
- return info->get_valuelen(key, valuelen, flag);
-}
-
-int PMPI_Unpack(void* inbuf, int incount, int* position, void* outbuf, int outcount, MPI_Datatype type, MPI_Comm comm) {
- if(incount<0 || outcount < 0 || inbuf==nullptr || outbuf==nullptr)
- return MPI_ERR_ARG;
- if (not type->is_valid())
- return MPI_ERR_TYPE;
- if(comm==MPI_COMM_NULL)
- return MPI_ERR_COMM;
- return type->unpack(inbuf, incount, position, outbuf,outcount, comm);
-}
-
-int PMPI_Pack(void* inbuf, int incount, MPI_Datatype type, void* outbuf, int outcount, int* position, MPI_Comm comm) {
- if(incount<0 || outcount < 0|| inbuf==nullptr || outbuf==nullptr)
- return MPI_ERR_ARG;
- if (not type->is_valid())
- return MPI_ERR_TYPE;
- if(comm==MPI_COMM_NULL)
- return MPI_ERR_COMM;
- return type->pack(inbuf, incount, outbuf,outcount,position, comm);
-}
-
-int PMPI_Pack_size(int incount, MPI_Datatype datatype, MPI_Comm comm, int* size) {
- if(incount<0)
- return MPI_ERR_ARG;
- if (not datatype->is_valid())
- return MPI_ERR_TYPE;
- if(comm==MPI_COMM_NULL)
- return MPI_ERR_COMM;
-
- *size=incount*datatype->size();
-
- return MPI_SUCCESS;
-}
-
} // extern "C"
--- /dev/null
+/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "private.h"
+#include "smpi_comm.hpp"
+#include "smpi_coll.hpp"
+#include "smpi_datatype_derived.hpp"
+#include "smpi_op.hpp"
+#include "smpi_process.hpp"
+
+XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi);
+
+
+/* PMPI User level calls */
+extern "C" { // Obviously, the C MPI interface should use the C linkage
+
+int PMPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+
+ if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (not datatype->is_valid()) {
+ retval = MPI_ERR_ARG;
+ } else {
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ int root_traced = comm->group()->index(root);
+
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
+ extra->type = TRACING_BCAST;
+ extra->root = root_traced;
+ int known = 0;
+ extra->datatype1 = encode_datatype(datatype, &known);
+ int dt_size_send = 1;
+ if (known == 0)
+ dt_size_send = datatype->size();
+ extra->send_size = count * dt_size_send;
+ TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
+ if (comm->size() > 1)
+ simgrid::smpi::Colls::bcast(buf, count, datatype, root, comm);
+ retval = MPI_SUCCESS;
+
+ TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Barrier(MPI_Comm comm)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+
+ if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else {
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
+ extra->type = TRACING_BARRIER;
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
+
+ simgrid::smpi::Colls::barrier(comm);
+
+ //Barrier can be used to synchronize RMA calls. Finish all requests from comm before.
+ comm->finish_rma_calls();
+
+ retval = MPI_SUCCESS;
+
+ TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
+ }
+
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvbuf, int recvcount, MPI_Datatype recvtype,
+ int root, MPI_Comm comm)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+
+ if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) ||
+ ((comm->rank() == root) && (recvtype == MPI_DATATYPE_NULL))){
+ retval = MPI_ERR_TYPE;
+ } else if ((( sendbuf != MPI_IN_PLACE) && (sendcount <0)) || ((comm->rank() == root) && (recvcount <0))){
+ retval = MPI_ERR_COUNT;
+ } else {
+
+ char* sendtmpbuf = static_cast<char*>(sendbuf);
+ int sendtmpcount = sendcount;
+ MPI_Datatype sendtmptype = sendtype;
+ if( (comm->rank() == root) && (sendbuf == MPI_IN_PLACE )) {
+ sendtmpcount=0;
+ sendtmptype=recvtype;
+ }
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ int root_traced = comm->group()->index(root);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
+ extra->type = TRACING_GATHER;
+ extra->root = root_traced;
+ int known = 0;
+ extra->datatype1 = encode_datatype(sendtmptype, &known);
+ int dt_size_send = 1;
+ if (known == 0)
+ dt_size_send = sendtmptype->size();
+ extra->send_size = sendtmpcount * dt_size_send;
+ extra->datatype2 = encode_datatype(recvtype, &known);
+ int dt_size_recv = 1;
+ if ((comm->rank() == root) && known == 0)
+ dt_size_recv = recvtype->size();
+ extra->recv_size = recvcount * dt_size_recv;
+
+ TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
+
+ simgrid::smpi::Colls::gather(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcount, recvtype, root, comm);
+
+ retval = MPI_SUCCESS;
+ TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
+ }
+
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int *recvcounts, int *displs,
+ MPI_Datatype recvtype, int root, MPI_Comm comm)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+
+ if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) ||
+ ((comm->rank() == root) && (recvtype == MPI_DATATYPE_NULL))){
+ retval = MPI_ERR_TYPE;
+ } else if (( sendbuf != MPI_IN_PLACE) && (sendcount <0)){
+ retval = MPI_ERR_COUNT;
+ } else if (recvcounts == nullptr || displs == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else {
+ char* sendtmpbuf = static_cast<char*>(sendbuf);
+ int sendtmpcount = sendcount;
+ MPI_Datatype sendtmptype = sendtype;
+ if( (comm->rank() == root) && (sendbuf == MPI_IN_PLACE )) {
+ sendtmpcount=0;
+ sendtmptype=recvtype;
+ }
+
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ int root_traced = comm->group()->index(root);
+ int size = comm->size();
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
+ extra->type = TRACING_GATHERV;
+ extra->num_processes = size;
+ extra->root = root_traced;
+ int known = 0;
+ extra->datatype1 = encode_datatype(sendtmptype, &known);
+ int dt_size_send = 1;
+ if (known == 0)
+ dt_size_send = sendtype->size();
+ extra->send_size = sendtmpcount * dt_size_send;
+ extra->datatype2 = encode_datatype(recvtype, &known);
+ int dt_size_recv = 1;
+ if (known == 0)
+ dt_size_recv = recvtype->size();
+ if (comm->rank() == root) {
+ extra->recvcounts = xbt_new(int, size);
+ for (int i = 0; i < size; i++) // copy data to avoid bad free
+ extra->recvcounts[i] = recvcounts[i] * dt_size_recv;
+ }
+ TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
+
+ retval = simgrid::smpi::Colls::gatherv(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcounts, displs, recvtype, root, comm);
+ TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
+ }
+
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
+ void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+
+ if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) ||
+ (recvtype == MPI_DATATYPE_NULL)){
+ retval = MPI_ERR_TYPE;
+ } else if ((( sendbuf != MPI_IN_PLACE) && (sendcount <0)) ||
+ (recvcount <0)){
+ retval = MPI_ERR_COUNT;
+ } else {
+ if(sendbuf == MPI_IN_PLACE) {
+ sendbuf=static_cast<char*>(recvbuf)+recvtype->get_extent()*recvcount*comm->rank();
+ sendcount=recvcount;
+ sendtype=recvtype;
+ }
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
+ extra->type = TRACING_ALLGATHER;
+ int known = 0;
+ extra->datatype1 = encode_datatype(sendtype, &known);
+ int dt_size_send = 1;
+ if (known == 0)
+ dt_size_send = sendtype->size();
+ extra->send_size = sendcount * dt_size_send;
+ extra->datatype2 = encode_datatype(recvtype, &known);
+ int dt_size_recv = 1;
+ if (known == 0)
+ dt_size_recv = recvtype->size();
+ extra->recv_size = recvcount * dt_size_recv;
+
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
+
+ simgrid::smpi::Colls::allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm);
+ retval = MPI_SUCCESS;
+ TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
+ void *recvbuf, int *recvcounts, int *displs, MPI_Datatype recvtype, MPI_Comm comm)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+
+ if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (((sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) || (recvtype == MPI_DATATYPE_NULL)) {
+ retval = MPI_ERR_TYPE;
+ } else if (( sendbuf != MPI_IN_PLACE) && (sendcount <0)){
+ retval = MPI_ERR_COUNT;
+ } else if (recvcounts == nullptr || displs == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else {
+
+ if(sendbuf == MPI_IN_PLACE) {
+ sendbuf=static_cast<char*>(recvbuf)+recvtype->get_extent()*displs[comm->rank()];
+ sendcount=recvcounts[comm->rank()];
+ sendtype=recvtype;
+ }
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ int i = 0;
+ int size = comm->size();
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
+ extra->type = TRACING_ALLGATHERV;
+ extra->num_processes = size;
+ int known = 0;
+ extra->datatype1 = encode_datatype(sendtype, &known);
+ int dt_size_send = 1;
+ if (known == 0)
+ dt_size_send = sendtype->size();
+ extra->send_size = sendcount * dt_size_send;
+ extra->datatype2 = encode_datatype(recvtype, &known);
+ int dt_size_recv = 1;
+ if (known == 0)
+ dt_size_recv = recvtype->size();
+ extra->recvcounts = xbt_new(int, size);
+ for (i = 0; i < size; i++) // copy data to avoid bad free
+ extra->recvcounts[i] = recvcounts[i] * dt_size_recv;
+
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
+
+ simgrid::smpi::Colls::allgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm);
+ retval = MPI_SUCCESS;
+ TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
+ }
+
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
+ void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+
+ if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (((comm->rank() == root) && (not sendtype->is_valid())) ||
+ ((recvbuf != MPI_IN_PLACE) && (not recvtype->is_valid()))) {
+ retval = MPI_ERR_TYPE;
+ } else if ((sendbuf == recvbuf) ||
+ ((comm->rank()==root) && sendcount>0 && (sendbuf == nullptr))){
+ retval = MPI_ERR_BUFFER;
+ }else {
+
+ if (recvbuf == MPI_IN_PLACE) {
+ recvtype = sendtype;
+ recvcount = sendcount;
+ }
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ int root_traced = comm->group()->index(root);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
+ extra->type = TRACING_SCATTER;
+ extra->root = root_traced;
+ int known = 0;
+ extra->datatype1 = encode_datatype(sendtype, &known);
+ int dt_size_send = 1;
+ if ((comm->rank() == root) && known == 0)
+ dt_size_send = sendtype->size();
+ extra->send_size = sendcount * dt_size_send;
+ extra->datatype2 = encode_datatype(recvtype, &known);
+ int dt_size_recv = 1;
+ if (known == 0)
+ dt_size_recv = recvtype->size();
+ extra->recv_size = recvcount * dt_size_recv;
+ TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
+
+ simgrid::smpi::Colls::scatter(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm);
+ retval = MPI_SUCCESS;
+ TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
+ }
+
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Scatterv(void *sendbuf, int *sendcounts, int *displs,
+ MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+
+ if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (sendcounts == nullptr || displs == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else if (((comm->rank() == root) && (sendtype == MPI_DATATYPE_NULL)) ||
+ ((recvbuf != MPI_IN_PLACE) && (recvtype == MPI_DATATYPE_NULL))) {
+ retval = MPI_ERR_TYPE;
+ } else {
+ if (recvbuf == MPI_IN_PLACE) {
+ recvtype = sendtype;
+ recvcount = sendcounts[comm->rank()];
+ }
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ int root_traced = comm->group()->index(root);
+ int size = comm->size();
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
+ extra->type = TRACING_SCATTERV;
+ extra->num_processes = size;
+ extra->root = root_traced;
+ int known = 0;
+ extra->datatype1 = encode_datatype(sendtype, &known);
+ int dt_size_send = 1;
+ if (known == 0)
+ dt_size_send = sendtype->size();
+ if (comm->rank() == root) {
+ extra->sendcounts = xbt_new(int, size);
+ for (int i = 0; i < size; i++) // copy data to avoid bad free
+ extra->sendcounts[i] = sendcounts[i] * dt_size_send;
+ }
+ extra->datatype2 = encode_datatype(recvtype, &known);
+ int dt_size_recv = 1;
+ if (known == 0)
+ dt_size_recv = recvtype->size();
+ extra->recv_size = recvcount * dt_size_recv;
+ TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
+
+ retval = simgrid::smpi::Colls::scatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm);
+
+ TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
+ }
+
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Reduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+
+ if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (not datatype->is_valid() || op == MPI_OP_NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ int root_traced = comm->group()->index(root);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
+ extra->type = TRACING_REDUCE;
+ int known = 0;
+ extra->datatype1 = encode_datatype(datatype, &known);
+ int dt_size_send = 1;
+ if (known == 0)
+ dt_size_send = datatype->size();
+ extra->send_size = count * dt_size_send;
+ extra->root = root_traced;
+
+ TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
+
+ simgrid::smpi::Colls::reduce(sendbuf, recvbuf, count, datatype, op, root, comm);
+
+ retval = MPI_SUCCESS;
+ TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
+ }
+
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Reduce_local(void *inbuf, void *inoutbuf, int count, MPI_Datatype datatype, MPI_Op op){
+ int retval = 0;
+
+ smpi_bench_end();
+ if (not datatype->is_valid() || op == MPI_OP_NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ op->apply(inbuf, inoutbuf, &count, datatype);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Allreduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+
+ if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (not datatype->is_valid()) {
+ retval = MPI_ERR_TYPE;
+ } else if (op == MPI_OP_NULL) {
+ retval = MPI_ERR_OP;
+ } else {
+
+ char* sendtmpbuf = static_cast<char*>(sendbuf);
+ if( sendbuf == MPI_IN_PLACE ) {
+ sendtmpbuf = static_cast<char*>(xbt_malloc(count*datatype->get_extent()));
+ simgrid::smpi::Datatype::copy(recvbuf, count, datatype,sendtmpbuf, count, datatype);
+ }
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
+ extra->type = TRACING_ALLREDUCE;
+ int known = 0;
+ extra->datatype1 = encode_datatype(datatype, &known);
+ int dt_size_send = 1;
+ if (known == 0)
+ dt_size_send = datatype->size();
+ extra->send_size = count * dt_size_send;
+
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
+
+ simgrid::smpi::Colls::allreduce(sendtmpbuf, recvbuf, count, datatype, op, comm);
+
+ if( sendbuf == MPI_IN_PLACE )
+ xbt_free(sendtmpbuf);
+
+ retval = MPI_SUCCESS;
+ TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
+ }
+
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Scan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+
+ if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (not datatype->is_valid()) {
+ retval = MPI_ERR_TYPE;
+ } else if (op == MPI_OP_NULL) {
+ retval = MPI_ERR_OP;
+ } else {
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
+ extra->type = TRACING_SCAN;
+ int known = 0;
+ extra->datatype1 = encode_datatype(datatype, &known);
+ int dt_size_send = 1;
+ if (known == 0)
+ dt_size_send = datatype->size();
+ extra->send_size = count * dt_size_send;
+
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
+
+ retval = simgrid::smpi::Colls::scan(sendbuf, recvbuf, count, datatype, op, comm);
+
+ TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
+ }
+
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Exscan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm){
+ int retval = 0;
+
+ smpi_bench_end();
+
+ if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (not datatype->is_valid()) {
+ retval = MPI_ERR_TYPE;
+ } else if (op == MPI_OP_NULL) {
+ retval = MPI_ERR_OP;
+ } else {
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
+ extra->type = TRACING_EXSCAN;
+ int known = 0;
+ extra->datatype1 = encode_datatype(datatype, &known);
+ int dt_size_send = 1;
+ if (known == 0)
+ dt_size_send = datatype->size();
+ extra->send_size = count * dt_size_send;
+ void* sendtmpbuf = sendbuf;
+ if (sendbuf == MPI_IN_PLACE) {
+ sendtmpbuf = static_cast<void*>(xbt_malloc(count * datatype->size()));
+ memcpy(sendtmpbuf, recvbuf, count * datatype->size());
+ }
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
+
+ retval = simgrid::smpi::Colls::exscan(sendtmpbuf, recvbuf, count, datatype, op, comm);
+
+ TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
+ if (sendbuf == MPI_IN_PLACE)
+ xbt_free(sendtmpbuf);
+ }
+
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
+{
+ int retval = 0;
+ smpi_bench_end();
+
+ if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (not datatype->is_valid()) {
+ retval = MPI_ERR_TYPE;
+ } else if (op == MPI_OP_NULL) {
+ retval = MPI_ERR_OP;
+ } else if (recvcounts == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else {
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ int i = 0;
+ int size = comm->size();
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
+ extra->type = TRACING_REDUCE_SCATTER;
+ extra->num_processes = size;
+ int known = 0;
+ extra->datatype1 = encode_datatype(datatype, &known);
+ int dt_size_send = 1;
+ if (known == 0)
+ dt_size_send = datatype->size();
+ extra->send_size = 0;
+ extra->recvcounts = xbt_new(int, size);
+ int totalcount = 0;
+ for (i = 0; i < size; i++) { // copy data to avoid bad free
+ extra->recvcounts[i] = recvcounts[i] * dt_size_send;
+ totalcount += recvcounts[i];
+ }
+ void* sendtmpbuf = sendbuf;
+ if (sendbuf == MPI_IN_PLACE) {
+ sendtmpbuf = static_cast<void*>(xbt_malloc(totalcount * datatype->size()));
+ memcpy(sendtmpbuf, recvbuf, totalcount * datatype->size());
+ }
+
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
+
+ simgrid::smpi::Colls::reduce_scatter(sendtmpbuf, recvbuf, recvcounts, datatype, op, comm);
+ retval = MPI_SUCCESS;
+ TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
+
+ if (sendbuf == MPI_IN_PLACE)
+ xbt_free(sendtmpbuf);
+ }
+
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Reduce_scatter_block(void *sendbuf, void *recvbuf, int recvcount,
+ MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
+{
+ int retval;
+ smpi_bench_end();
+
+ if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (not datatype->is_valid()) {
+ retval = MPI_ERR_TYPE;
+ } else if (op == MPI_OP_NULL) {
+ retval = MPI_ERR_OP;
+ } else if (recvcount < 0) {
+ retval = MPI_ERR_ARG;
+ } else {
+ int count = comm->size();
+
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
+ extra->type = TRACING_REDUCE_SCATTER;
+ extra->num_processes = count;
+ int known = 0;
+ extra->datatype1 = encode_datatype(datatype, &known);
+ int dt_size_send = 1;
+ if (known == 0)
+ dt_size_send = datatype->size();
+ extra->send_size = 0;
+ extra->recvcounts = xbt_new(int, count);
+ for (int i = 0; i < count; i++) // copy data to avoid bad free
+ extra->recvcounts[i] = recvcount * dt_size_send;
+ void* sendtmpbuf = sendbuf;
+ if (sendbuf == MPI_IN_PLACE) {
+ sendtmpbuf = static_cast<void*>(xbt_malloc(recvcount * count * datatype->size()));
+ memcpy(sendtmpbuf, recvbuf, recvcount * count * datatype->size());
+ }
+
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
+
+ int* recvcounts = static_cast<int*>(xbt_malloc(count * sizeof(int)));
+ for (int i = 0; i < count; i++)
+ recvcounts[i] = recvcount;
+ simgrid::smpi::Colls::reduce_scatter(sendtmpbuf, recvbuf, recvcounts, datatype, op, comm);
+ xbt_free(recvcounts);
+ retval = MPI_SUCCESS;
+
+ TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
+
+ if (sendbuf == MPI_IN_PLACE)
+ xbt_free(sendtmpbuf);
+ }
+
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Alltoall(void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
+ MPI_Datatype recvtype, MPI_Comm comm)
+{
+ int retval = 0;
+ smpi_bench_end();
+
+ if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if ((sendbuf != MPI_IN_PLACE && sendtype == MPI_DATATYPE_NULL) || recvtype == MPI_DATATYPE_NULL) {
+ retval = MPI_ERR_TYPE;
+ } else {
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
+ extra->type = TRACING_ALLTOALL;
+
+ void* sendtmpbuf = static_cast<char*>(sendbuf);
+ int sendtmpcount = sendcount;
+ MPI_Datatype sendtmptype = sendtype;
+ if (sendbuf == MPI_IN_PLACE) {
+ sendtmpbuf = static_cast<void*>(xbt_malloc(recvcount * comm->size() * recvtype->size()));
+ memcpy(sendtmpbuf, recvbuf, recvcount * comm->size() * recvtype->size());
+ sendtmpcount = recvcount;
+ sendtmptype = recvtype;
+ }
+
+ int known = 0;
+ extra->datatype1 = encode_datatype(sendtmptype, &known);
+ if (known == 0)
+ extra->send_size = sendtmpcount * sendtmptype->size();
+ else
+ extra->send_size = sendtmpcount;
+ extra->datatype2 = encode_datatype(recvtype, &known);
+ if (known == 0)
+ extra->recv_size = recvcount * recvtype->size();
+ else
+ extra->recv_size = recvcount;
+
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
+
+ retval = simgrid::smpi::Colls::alltoall(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcount, recvtype, comm);
+
+ TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
+
+ if (sendbuf == MPI_IN_PLACE)
+ xbt_free(sendtmpbuf);
+ }
+
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Alltoallv(void* sendbuf, int* sendcounts, int* senddisps, MPI_Datatype sendtype, void* recvbuf,
+ int* recvcounts, int* recvdisps, MPI_Datatype recvtype, MPI_Comm comm)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+
+ if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (sendtype == MPI_DATATYPE_NULL || recvtype == MPI_DATATYPE_NULL) {
+ retval = MPI_ERR_TYPE;
+ } else if ((sendbuf != MPI_IN_PLACE && (sendcounts == nullptr || senddisps == nullptr)) || recvcounts == nullptr ||
+ recvdisps == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else {
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ int i = 0;
+ int size = comm->size();
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
+ extra->type = TRACING_ALLTOALLV;
+ extra->send_size = 0;
+ extra->recv_size = 0;
+ extra->recvcounts = xbt_new(int, size);
+ extra->sendcounts = xbt_new(int, size);
+ int known = 0;
+ extra->datatype2 = encode_datatype(recvtype, &known);
+ int dt_size_recv = recvtype->size();
+
+ void* sendtmpbuf = static_cast<char*>(sendbuf);
+ int* sendtmpcounts = sendcounts;
+ int* sendtmpdisps = senddisps;
+ MPI_Datatype sendtmptype = sendtype;
+ int maxsize = 0;
+ for (i = 0; i < size; i++) { // copy data to avoid bad free
+ extra->recv_size += recvcounts[i] * dt_size_recv;
+ extra->recvcounts[i] = recvcounts[i] * dt_size_recv;
+ if (((recvdisps[i] + recvcounts[i]) * dt_size_recv) > maxsize)
+ maxsize = (recvdisps[i] + recvcounts[i]) * dt_size_recv;
+ }
+
+ if (sendbuf == MPI_IN_PLACE) {
+ sendtmpbuf = static_cast<void*>(xbt_malloc(maxsize));
+ memcpy(sendtmpbuf, recvbuf, maxsize);
+ sendtmpcounts = static_cast<int*>(xbt_malloc(size * sizeof(int)));
+ memcpy(sendtmpcounts, recvcounts, size * sizeof(int));
+ sendtmpdisps = static_cast<int*>(xbt_malloc(size * sizeof(int)));
+ memcpy(sendtmpdisps, recvdisps, size * sizeof(int));
+ sendtmptype = recvtype;
+ }
+
+ extra->datatype1 = encode_datatype(sendtmptype, &known);
+ int dt_size_send = sendtmptype->size();
+
+ for (i = 0; i < size; i++) { // copy data to avoid bad free
+ extra->send_size += sendtmpcounts[i] * dt_size_send;
+ extra->sendcounts[i] = sendtmpcounts[i] * dt_size_send;
+ }
+ extra->num_processes = size;
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
+ retval = simgrid::smpi::Colls::alltoallv(sendtmpbuf, sendtmpcounts, sendtmpdisps, sendtmptype, recvbuf, recvcounts,
+ recvdisps, recvtype, comm);
+ TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
+
+ if (sendbuf == MPI_IN_PLACE) {
+ xbt_free(sendtmpbuf);
+ xbt_free(sendtmpcounts);
+ xbt_free(sendtmpdisps);
+ }
+ }
+
+ smpi_bench_begin();
+ return retval;
+}
+
+}
--- /dev/null
+/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include <climits>
+
+#include "private.h"
+#include "smpi_comm.hpp"
+#include "smpi_process.hpp"
+
+XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi);
+
+/* PMPI User level calls */
+extern "C" { // Obviously, the C MPI interface should use the C linkage
+
+int PMPI_Comm_rank(MPI_Comm comm, int *rank)
+{
+ if (comm == MPI_COMM_NULL) {
+ return MPI_ERR_COMM;
+ } else if (rank == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ *rank = comm->rank();
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Comm_size(MPI_Comm comm, int *size)
+{
+ if (comm == MPI_COMM_NULL) {
+ return MPI_ERR_COMM;
+ } else if (size == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ *size = comm->size();
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Comm_get_name (MPI_Comm comm, char* name, int* len)
+{
+ if (comm == MPI_COMM_NULL) {
+ return MPI_ERR_COMM;
+ } else if (name == nullptr || len == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ comm->get_name(name, len);
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Comm_group(MPI_Comm comm, MPI_Group * group)
+{
+ if (comm == MPI_COMM_NULL) {
+ return MPI_ERR_COMM;
+ } else if (group == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ *group = comm->group();
+ if (*group != MPI_COMM_WORLD->group() && *group != MPI_GROUP_NULL && *group != MPI_GROUP_EMPTY)
+ (*group)->ref();
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Comm_compare(MPI_Comm comm1, MPI_Comm comm2, int *result)
+{
+ if (comm1 == MPI_COMM_NULL || comm2 == MPI_COMM_NULL) {
+ return MPI_ERR_COMM;
+ } else if (result == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ if (comm1 == comm2) { /* Same communicators means same groups */
+ *result = MPI_IDENT;
+ } else {
+ *result = comm1->group()->compare(comm2->group());
+ if (*result == MPI_IDENT) {
+ *result = MPI_CONGRUENT;
+ }
+ }
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Comm_dup(MPI_Comm comm, MPI_Comm * newcomm)
+{
+ if (comm == MPI_COMM_NULL) {
+ return MPI_ERR_COMM;
+ } else if (newcomm == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ return comm->dup(newcomm);
+ }
+}
+
+int PMPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm * newcomm)
+{
+ if (comm == MPI_COMM_NULL) {
+ return MPI_ERR_COMM;
+ } else if (group == MPI_GROUP_NULL) {
+ return MPI_ERR_GROUP;
+ } else if (newcomm == nullptr) {
+ return MPI_ERR_ARG;
+ } else if(group->rank(smpi_process()->index())==MPI_UNDEFINED){
+ *newcomm= MPI_COMM_NULL;
+ return MPI_SUCCESS;
+ }else{
+ group->ref();
+ *newcomm = new simgrid::smpi::Comm(group, nullptr);
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Comm_free(MPI_Comm * comm)
+{
+ if (comm == nullptr) {
+ return MPI_ERR_ARG;
+ } else if (*comm == MPI_COMM_NULL) {
+ return MPI_ERR_COMM;
+ } else {
+ simgrid::smpi::Comm::destroy(*comm);
+ *comm = MPI_COMM_NULL;
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Comm_disconnect(MPI_Comm * comm)
+{
+ /* TODO: wait until all communication in comm are done */
+ if (comm == nullptr) {
+ return MPI_ERR_ARG;
+ } else if (*comm == MPI_COMM_NULL) {
+ return MPI_ERR_COMM;
+ } else {
+ simgrid::smpi::Comm::destroy(*comm);
+ *comm = MPI_COMM_NULL;
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Comm_split(MPI_Comm comm, int color, int key, MPI_Comm* comm_out)
+{
+ int retval = 0;
+ smpi_bench_end();
+
+ if (comm_out == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else {
+ *comm_out = comm->split(color, key);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+
+ return retval;
+}
+
+int PMPI_Comm_create_group(MPI_Comm comm, MPI_Group group, int, MPI_Comm* comm_out)
+{
+ int retval = 0;
+ smpi_bench_end();
+
+ if (comm_out == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else {
+ retval = MPI_Comm_create(comm, group, comm_out);
+ }
+ smpi_bench_begin();
+
+ return retval;
+}
+
+MPI_Comm PMPI_Comm_f2c(MPI_Fint comm){
+ return static_cast<MPI_Comm>(simgrid::smpi::Comm::f2c(comm));
+}
+
+MPI_Fint PMPI_Comm_c2f(MPI_Comm comm){
+ return comm->c2f();
+}
+
+int PMPI_Comm_get_attr (MPI_Comm comm, int comm_keyval, void *attribute_val, int *flag)
+{
+ return PMPI_Attr_get(comm, comm_keyval, attribute_val,flag);
+}
+
+int PMPI_Comm_set_attr (MPI_Comm comm, int comm_keyval, void *attribute_val)
+{
+ return PMPI_Attr_put(comm, comm_keyval, attribute_val);
+}
+
+int PMPI_Comm_delete_attr (MPI_Comm comm, int comm_keyval)
+{
+ return PMPI_Attr_delete(comm, comm_keyval);
+}
+
+int PMPI_Comm_create_keyval(MPI_Comm_copy_attr_function* copy_fn, MPI_Comm_delete_attr_function* delete_fn, int* keyval,
+ void* extra_state)
+{
+ return PMPI_Keyval_create(copy_fn, delete_fn, keyval, extra_state);
+}
+
+int PMPI_Comm_free_keyval(int* keyval) {
+ return PMPI_Keyval_free(keyval);
+}
+
+int PMPI_Attr_delete(MPI_Comm comm, int keyval) {
+ if(keyval == MPI_TAG_UB||keyval == MPI_HOST||keyval == MPI_IO ||keyval == MPI_WTIME_IS_GLOBAL||keyval == MPI_APPNUM
+ ||keyval == MPI_UNIVERSE_SIZE||keyval == MPI_LASTUSEDCODE)
+ return MPI_ERR_ARG;
+ else if (comm==MPI_COMM_NULL)
+ return MPI_ERR_COMM;
+ else
+ return comm->attr_delete<simgrid::smpi::Comm>(keyval);
+}
+
+int PMPI_Attr_get(MPI_Comm comm, int keyval, void* attr_value, int* flag) {
+ static int one = 1;
+ static int zero = 0;
+ static int tag_ub = INT_MAX;
+ static int last_used_code = MPI_ERR_LASTCODE;
+
+ if (comm==MPI_COMM_NULL){
+ *flag = 0;
+ return MPI_ERR_COMM;
+ }
+
+ switch (keyval) {
+ case MPI_HOST:
+ case MPI_IO:
+ case MPI_APPNUM:
+ *flag = 1;
+ *static_cast<int**>(attr_value) = &zero;
+ return MPI_SUCCESS;
+ case MPI_UNIVERSE_SIZE:
+ *flag = 1;
+ *static_cast<int**>(attr_value) = &smpi_universe_size;
+ return MPI_SUCCESS;
+ case MPI_LASTUSEDCODE:
+ *flag = 1;
+ *static_cast<int**>(attr_value) = &last_used_code;
+ return MPI_SUCCESS;
+ case MPI_TAG_UB:
+ *flag=1;
+ *static_cast<int**>(attr_value) = &tag_ub;
+ return MPI_SUCCESS;
+ case MPI_WTIME_IS_GLOBAL:
+ *flag = 1;
+ *static_cast<int**>(attr_value) = &one;
+ return MPI_SUCCESS;
+ default:
+ return comm->attr_get<simgrid::smpi::Comm>(keyval, attr_value, flag);
+ }
+}
+
+int PMPI_Attr_put(MPI_Comm comm, int keyval, void* attr_value) {
+ if(keyval == MPI_TAG_UB||keyval == MPI_HOST||keyval == MPI_IO ||keyval == MPI_WTIME_IS_GLOBAL||keyval == MPI_APPNUM
+ ||keyval == MPI_UNIVERSE_SIZE||keyval == MPI_LASTUSEDCODE)
+ return MPI_ERR_ARG;
+ else if (comm==MPI_COMM_NULL)
+ return MPI_ERR_COMM;
+ else
+ return comm->attr_put<simgrid::smpi::Comm>(keyval, attr_value);
+}
+
+}
--- /dev/null
+/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "private.h"
+#include "smpi_comm.hpp"
+#include "smpi_coll.hpp"
+#include "smpi_datatype_derived.hpp"
+#include "smpi_op.hpp"
+#include "smpi_process.hpp"
+
+XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi);
+
+
+/* PMPI User level calls */
+extern "C" { // Obviously, the C MPI interface should use the C linkage
+
+int PMPI_Group_free(MPI_Group * group)
+{
+ if (group == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ if(*group != MPI_COMM_WORLD->group() && *group != MPI_GROUP_EMPTY)
+ simgrid::smpi::Group::unref(*group);
+ *group = MPI_GROUP_NULL;
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Group_size(MPI_Group group, int *size)
+{
+ if (group == MPI_GROUP_NULL) {
+ return MPI_ERR_GROUP;
+ } else if (size == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ *size = group->size();
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Group_rank(MPI_Group group, int *rank)
+{
+ if (group == MPI_GROUP_NULL) {
+ return MPI_ERR_GROUP;
+ } else if (rank == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ *rank = group->rank(smpi_process()->index());
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Group_translate_ranks(MPI_Group group1, int n, int *ranks1, MPI_Group group2, int *ranks2)
+{
+ if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) {
+ return MPI_ERR_GROUP;
+ } else {
+ for (int i = 0; i < n; i++) {
+ if(ranks1[i]==MPI_PROC_NULL){
+ ranks2[i]=MPI_PROC_NULL;
+ }else{
+ int index = group1->index(ranks1[i]);
+ ranks2[i] = group2->rank(index);
+ }
+ }
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Group_compare(MPI_Group group1, MPI_Group group2, int *result)
+{
+ if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) {
+ return MPI_ERR_GROUP;
+ } else if (result == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ *result = group1->compare(group2);
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Group_union(MPI_Group group1, MPI_Group group2, MPI_Group * newgroup)
+{
+
+ if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) {
+ return MPI_ERR_GROUP;
+ } else if (newgroup == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ return group1->group_union(group2, newgroup);
+ }
+}
+
+int PMPI_Group_intersection(MPI_Group group1, MPI_Group group2, MPI_Group * newgroup)
+{
+
+ if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) {
+ return MPI_ERR_GROUP;
+ } else if (newgroup == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ return group1->intersection(group2,newgroup);
+ }
+}
+
+int PMPI_Group_difference(MPI_Group group1, MPI_Group group2, MPI_Group * newgroup)
+{
+ if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) {
+ return MPI_ERR_GROUP;
+ } else if (newgroup == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ return group1->difference(group2,newgroup);
+ }
+}
+
+int PMPI_Group_incl(MPI_Group group, int n, int *ranks, MPI_Group * newgroup)
+{
+ if (group == MPI_GROUP_NULL) {
+ return MPI_ERR_GROUP;
+ } else if (newgroup == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ return group->incl(n, ranks, newgroup);
+ }
+}
+
+int PMPI_Group_excl(MPI_Group group, int n, int *ranks, MPI_Group * newgroup)
+{
+ if (group == MPI_GROUP_NULL) {
+ return MPI_ERR_GROUP;
+ } else if (newgroup == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ if (n == 0) {
+ *newgroup = group;
+ if (group != MPI_COMM_WORLD->group()
+ && group != MPI_COMM_SELF->group() && group != MPI_GROUP_EMPTY)
+ group->ref();
+ return MPI_SUCCESS;
+ } else if (n == group->size()) {
+ *newgroup = MPI_GROUP_EMPTY;
+ return MPI_SUCCESS;
+ } else {
+ return group->excl(n,ranks,newgroup);
+ }
+ }
+}
+
+int PMPI_Group_range_incl(MPI_Group group, int n, int ranges[][3], MPI_Group * newgroup)
+{
+ if (group == MPI_GROUP_NULL) {
+ return MPI_ERR_GROUP;
+ } else if (newgroup == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ if (n == 0) {
+ *newgroup = MPI_GROUP_EMPTY;
+ return MPI_SUCCESS;
+ } else {
+ return group->range_incl(n,ranges,newgroup);
+ }
+ }
+}
+
+int PMPI_Group_range_excl(MPI_Group group, int n, int ranges[][3], MPI_Group * newgroup)
+{
+ if (group == MPI_GROUP_NULL) {
+ return MPI_ERR_GROUP;
+ } else if (newgroup == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ if (n == 0) {
+ *newgroup = group;
+ if (group != MPI_COMM_WORLD->group() && group != MPI_COMM_SELF->group() &&
+ group != MPI_GROUP_EMPTY)
+ group->ref();
+ return MPI_SUCCESS;
+ } else {
+ return group->range_excl(n,ranges,newgroup);
+ }
+ }
+}
+
+MPI_Group PMPI_Group_f2c(MPI_Fint group){
+ return simgrid::smpi::Group::f2c(group);
+}
+
+MPI_Fint PMPI_Group_c2f(MPI_Group group){
+ return group->c2f();
+}
+
+
+}
--- /dev/null
+/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "private.h"
+#include "smpi_info.hpp"
+
+XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi);
+
+/* PMPI User level calls */
+extern "C" { // Obviously, the C MPI interface should use the C linkage
+
+int PMPI_Info_create( MPI_Info *info){
+ if (info == nullptr)
+ return MPI_ERR_ARG;
+ *info = new simgrid::smpi::Info();
+ return MPI_SUCCESS;
+}
+
+int PMPI_Info_set( MPI_Info info, char *key, char *value){
+ if (info == nullptr || key == nullptr || value == nullptr)
+ return MPI_ERR_ARG;
+ info->set(key, value);
+ return MPI_SUCCESS;
+}
+
+int PMPI_Info_free( MPI_Info *info){
+ if (info == nullptr || *info==nullptr)
+ return MPI_ERR_ARG;
+ simgrid::smpi::Info::unref(*info);
+ *info=MPI_INFO_NULL;
+ return MPI_SUCCESS;
+}
+
+int PMPI_Info_get(MPI_Info info,char *key,int valuelen, char *value, int *flag){
+ *flag=false;
+ if (info == nullptr || key == nullptr || valuelen <0)
+ return MPI_ERR_ARG;
+ if (value == nullptr)
+ return MPI_ERR_INFO_VALUE;
+ return info->get(key, valuelen, value, flag);
+}
+
+int PMPI_Info_dup(MPI_Info info, MPI_Info *newinfo){
+ if (info == nullptr || newinfo==nullptr)
+ return MPI_ERR_ARG;
+ *newinfo = new simgrid::smpi::Info(info);
+ return MPI_SUCCESS;
+}
+
+int PMPI_Info_delete(MPI_Info info, char *key){
+ if (info == nullptr || key==nullptr)
+ return MPI_ERR_ARG;
+ return info->remove(key);
+}
+
+int PMPI_Info_get_nkeys( MPI_Info info, int *nkeys){
+ if (info == nullptr || nkeys==nullptr)
+ return MPI_ERR_ARG;
+ return info->get_nkeys(nkeys);
+}
+
+int PMPI_Info_get_nthkey( MPI_Info info, int n, char *key){
+ if (info == nullptr || key==nullptr || n<0 || n> MPI_MAX_INFO_KEY)
+ return MPI_ERR_ARG;
+ return info->get_nthkey(n, key);
+}
+
+int PMPI_Info_get_valuelen( MPI_Info info, char *key, int *valuelen, int *flag){
+ *flag=false;
+ if (info == nullptr || key == nullptr || valuelen==nullptr)
+ return MPI_ERR_ARG;
+ return info->get_valuelen(key, valuelen, flag);
+}
+
+MPI_Info PMPI_Info_f2c(MPI_Fint info){
+ return static_cast<MPI_Info>(simgrid::smpi::Info::f2c(info));
+}
+
+MPI_Fint PMPI_Info_c2f(MPI_Info info){
+ return info->c2f();
+}
+
+}
--- /dev/null
+/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "private.h"
+#include "smpi_op.hpp"
+
+XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi);
+
+/* PMPI User level calls */
+extern "C" { // Obviously, the C MPI interface should use the C linkage
+
+int PMPI_Op_create(MPI_User_function * function, int commute, MPI_Op * op)
+{
+ if (function == nullptr || op == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ *op = new simgrid::smpi::Op(function, (commute!=0));
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Op_free(MPI_Op * op)
+{
+ if (op == nullptr) {
+ return MPI_ERR_ARG;
+ } else if (*op == MPI_OP_NULL) {
+ return MPI_ERR_OP;
+ } else {
+ delete (*op);
+ *op = MPI_OP_NULL;
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Op_commutative(MPI_Op op, int* commute){
+ if (op == MPI_OP_NULL) {
+ return MPI_ERR_OP;
+ } else if (commute==nullptr){
+ return MPI_ERR_ARG;
+ } else {
+ *commute = op->is_commutative();
+ return MPI_SUCCESS;
+ }
+}
+
+MPI_Op PMPI_Op_f2c(MPI_Fint op){
+ return static_cast<MPI_Op>(simgrid::smpi::Op::f2c(op));
+}
+
+MPI_Fint PMPI_Op_c2f(MPI_Op op){
+ return op->c2f();
+}
+
+}
--- /dev/null
+/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "private.h"
+#include "smpi_comm.hpp"
+#include "smpi_datatype.hpp"
+#include "smpi_request.hpp"
+#include "smpi_process.hpp"
+
+XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi);
+
+/* PMPI User level calls */
+extern "C" { // Obviously, the C MPI interface should use the C linkage
+
+int PMPI_Send_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request * request)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+ if (request == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (not datatype->is_valid()) {
+ retval = MPI_ERR_TYPE;
+ } else if (dst == MPI_PROC_NULL) {
+ retval = MPI_SUCCESS;
+ } else {
+ *request = simgrid::smpi::Request::send_init(buf, count, datatype, dst, tag, comm);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ if (retval != MPI_SUCCESS && request != nullptr)
+ *request = MPI_REQUEST_NULL;
+ return retval;
+}
+
+int PMPI_Recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Request * request)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+ if (request == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (not datatype->is_valid()) {
+ retval = MPI_ERR_TYPE;
+ } else if (src == MPI_PROC_NULL) {
+ retval = MPI_SUCCESS;
+ } else {
+ *request = simgrid::smpi::Request::recv_init(buf, count, datatype, src, tag, comm);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ if (retval != MPI_SUCCESS && request != nullptr)
+ *request = MPI_REQUEST_NULL;
+ return retval;
+}
+
+int PMPI_Ssend_init(void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request* request)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+ if (request == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (not datatype->is_valid()) {
+ retval = MPI_ERR_TYPE;
+ } else if (dst == MPI_PROC_NULL) {
+ retval = MPI_SUCCESS;
+ } else {
+ *request = simgrid::smpi::Request::ssend_init(buf, count, datatype, dst, tag, comm);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ if (retval != MPI_SUCCESS && request != nullptr)
+ *request = MPI_REQUEST_NULL;
+ return retval;
+}
+
+int PMPI_Start(MPI_Request * request)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+ if (request == nullptr || *request == MPI_REQUEST_NULL) {
+ retval = MPI_ERR_REQUEST;
+ } else {
+ (*request)->start();
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Startall(int count, MPI_Request * requests)
+{
+ int retval;
+ smpi_bench_end();
+ if (requests == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else {
+ retval = MPI_SUCCESS;
+ for (int i = 0; i < count; i++) {
+ if(requests[i] == MPI_REQUEST_NULL) {
+ retval = MPI_ERR_REQUEST;
+ }
+ }
+ if(retval != MPI_ERR_REQUEST) {
+ simgrid::smpi::Request::startall(count, requests);
+ }
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Request_free(MPI_Request * request)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+ if (*request == MPI_REQUEST_NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ simgrid::smpi::Request::unref(request);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Request * request)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+
+ if (request == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (src == MPI_PROC_NULL) {
+ *request = MPI_REQUEST_NULL;
+ retval = MPI_SUCCESS;
+ } else if (src!=MPI_ANY_SOURCE && (src >= comm->group()->size() || src <0)){
+ retval = MPI_ERR_RANK;
+ } else if ((count < 0) || (buf==nullptr && count > 0)) {
+ retval = MPI_ERR_COUNT;
+ } else if (not datatype->is_valid()) {
+ retval = MPI_ERR_TYPE;
+ } else if(tag<0 && tag != MPI_ANY_TAG){
+ retval = MPI_ERR_TAG;
+ } else {
+
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ int src_traced = comm->group()->index(src);
+
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_IRECV;
+ extra->src = src_traced;
+ extra->dst = rank;
+ int known=0;
+ extra->datatype1 = encode_datatype(datatype, &known);
+ int dt_size_send = 1;
+ if(known==0)
+ dt_size_send = datatype->size();
+ extra->send_size = count*dt_size_send;
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, extra);
+
+ *request = simgrid::smpi::Request::irecv(buf, count, datatype, src, tag, comm);
+ retval = MPI_SUCCESS;
+
+ TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
+ }
+
+ smpi_bench_begin();
+ if (retval != MPI_SUCCESS && request != nullptr)
+ *request = MPI_REQUEST_NULL;
+ return retval;
+}
+
+
+int PMPI_Isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request * request)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+ if (request == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (dst == MPI_PROC_NULL) {
+ *request = MPI_REQUEST_NULL;
+ retval = MPI_SUCCESS;
+ } else if (dst >= comm->group()->size() || dst <0){
+ retval = MPI_ERR_RANK;
+ } else if ((count < 0) || (buf==nullptr && count > 0)) {
+ retval = MPI_ERR_COUNT;
+ } else if (not datatype->is_valid()) {
+ retval = MPI_ERR_TYPE;
+ } else if(tag<0 && tag != MPI_ANY_TAG){
+ retval = MPI_ERR_TAG;
+ } else {
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ int dst_traced = comm->group()->index(dst);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_ISEND;
+ extra->src = rank;
+ extra->dst = dst_traced;
+ int known=0;
+ extra->datatype1 = encode_datatype(datatype, &known);
+ int dt_size_send = 1;
+ if(known==0)
+ dt_size_send = datatype->size();
+ extra->send_size = count*dt_size_send;
+ TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra);
+ TRACE_smpi_send(rank, rank, dst_traced, tag, count*datatype->size());
+
+ *request = simgrid::smpi::Request::isend(buf, count, datatype, dst, tag, comm);
+ retval = MPI_SUCCESS;
+
+ TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
+ }
+
+ smpi_bench_begin();
+ if (retval != MPI_SUCCESS && request!=nullptr)
+ *request = MPI_REQUEST_NULL;
+ return retval;
+}
+
+int PMPI_Issend(void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request* request)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+ if (request == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (dst == MPI_PROC_NULL) {
+ *request = MPI_REQUEST_NULL;
+ retval = MPI_SUCCESS;
+ } else if (dst >= comm->group()->size() || dst <0){
+ retval = MPI_ERR_RANK;
+ } else if ((count < 0)|| (buf==nullptr && count > 0)) {
+ retval = MPI_ERR_COUNT;
+ } else if (not datatype->is_valid()) {
+ retval = MPI_ERR_TYPE;
+ } else if(tag<0 && tag != MPI_ANY_TAG){
+ retval = MPI_ERR_TAG;
+ } else {
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ int dst_traced = comm->group()->index(dst);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_ISSEND;
+ extra->src = rank;
+ extra->dst = dst_traced;
+ int known=0;
+ extra->datatype1 = encode_datatype(datatype, &known);
+ int dt_size_send = 1;
+ if(known==0)
+ dt_size_send = datatype->size();
+ extra->send_size = count*dt_size_send;
+ TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra);
+ TRACE_smpi_send(rank, rank, dst_traced, tag, count*datatype->size());
+
+ *request = simgrid::smpi::Request::issend(buf, count, datatype, dst, tag, comm);
+ retval = MPI_SUCCESS;
+
+ TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
+ }
+
+ smpi_bench_begin();
+ if (retval != MPI_SUCCESS && request!=nullptr)
+ *request = MPI_REQUEST_NULL;
+ return retval;
+}
+
+int PMPI_Recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+ if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (src == MPI_PROC_NULL) {
+ simgrid::smpi::Status::empty(status);
+ status->MPI_SOURCE = MPI_PROC_NULL;
+ retval = MPI_SUCCESS;
+ } else if (src!=MPI_ANY_SOURCE && (src >= comm->group()->size() || src <0)){
+ retval = MPI_ERR_RANK;
+ } else if ((count < 0) || (buf==nullptr && count > 0)) {
+ retval = MPI_ERR_COUNT;
+ } else if (not datatype->is_valid()) {
+ retval = MPI_ERR_TYPE;
+ } else if(tag<0 && tag != MPI_ANY_TAG){
+ retval = MPI_ERR_TAG;
+ } else {
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ int src_traced = comm->group()->index(src);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
+ extra->type = TRACING_RECV;
+ extra->src = src_traced;
+ extra->dst = rank;
+ int known = 0;
+ extra->datatype1 = encode_datatype(datatype, &known);
+ int dt_size_send = 1;
+ if (known == 0)
+ dt_size_send = datatype->size();
+ extra->send_size = count * dt_size_send;
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, extra);
+
+ simgrid::smpi::Request::recv(buf, count, datatype, src, tag, comm, status);
+ retval = MPI_SUCCESS;
+
+ // the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE)
+ if (status != MPI_STATUS_IGNORE) {
+ src_traced = comm->group()->index(status->MPI_SOURCE);
+ if (not TRACE_smpi_view_internals()) {
+ TRACE_smpi_recv(rank, src_traced, rank, tag);
+ }
+ }
+ TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
+ }
+
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+
+ if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (dst == MPI_PROC_NULL) {
+ retval = MPI_SUCCESS;
+ } else if (dst >= comm->group()->size() || dst <0){
+ retval = MPI_ERR_RANK;
+ } else if ((count < 0) || (buf == nullptr && count > 0)) {
+ retval = MPI_ERR_COUNT;
+ } else if (not datatype->is_valid()) {
+ retval = MPI_ERR_TYPE;
+ } else if(tag < 0 && tag != MPI_ANY_TAG){
+ retval = MPI_ERR_TAG;
+ } else {
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ int dst_traced = comm->group()->index(dst);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_SEND;
+ extra->src = rank;
+ extra->dst = dst_traced;
+ int known = 0;
+ extra->datatype1 = encode_datatype(datatype, &known);
+ int dt_size_send = 1;
+ if (known == 0) {
+ dt_size_send = datatype->size();
+ }
+ extra->send_size = count*dt_size_send;
+ TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra);
+ if (not TRACE_smpi_view_internals()) {
+ TRACE_smpi_send(rank, rank, dst_traced, tag,count*datatype->size());
+ }
+
+ simgrid::smpi::Request::send(buf, count, datatype, dst, tag, comm);
+ retval = MPI_SUCCESS;
+
+ TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
+ }
+
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Ssend(void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) {
+ int retval = 0;
+
+ smpi_bench_end();
+
+ if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (dst == MPI_PROC_NULL) {
+ retval = MPI_SUCCESS;
+ } else if (dst >= comm->group()->size() || dst <0){
+ retval = MPI_ERR_RANK;
+ } else if ((count < 0) || (buf==nullptr && count > 0)) {
+ retval = MPI_ERR_COUNT;
+ } else if (not datatype->is_valid()) {
+ retval = MPI_ERR_TYPE;
+ } else if(tag<0 && tag != MPI_ANY_TAG){
+ retval = MPI_ERR_TAG;
+ } else {
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ int dst_traced = comm->group()->index(dst);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_SSEND;
+ extra->src = rank;
+ extra->dst = dst_traced;
+ int known = 0;
+ extra->datatype1 = encode_datatype(datatype, &known);
+ int dt_size_send = 1;
+ if(known == 0) {
+ dt_size_send = datatype->size();
+ }
+ extra->send_size = count*dt_size_send;
+ TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra);
+ TRACE_smpi_send(rank, rank, dst_traced, tag,count*datatype->size());
+
+ simgrid::smpi::Request::ssend(buf, count, datatype, dst, tag, comm);
+ retval = MPI_SUCCESS;
+
+ TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
+ }
+
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dst, int sendtag, void *recvbuf,
+ int recvcount, MPI_Datatype recvtype, int src, int recvtag, MPI_Comm comm, MPI_Status * status)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+
+ if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (not sendtype->is_valid() || not recvtype->is_valid()) {
+ retval = MPI_ERR_TYPE;
+ } else if (src == MPI_PROC_NULL || dst == MPI_PROC_NULL) {
+ simgrid::smpi::Status::empty(status);
+ status->MPI_SOURCE = MPI_PROC_NULL;
+ retval = MPI_SUCCESS;
+ }else if (dst >= comm->group()->size() || dst <0 ||
+ (src!=MPI_ANY_SOURCE && (src >= comm->group()->size() || src <0))){
+ retval = MPI_ERR_RANK;
+ } else if ((sendcount < 0 || recvcount<0) ||
+ (sendbuf==nullptr && sendcount > 0) || (recvbuf==nullptr && recvcount>0)) {
+ retval = MPI_ERR_COUNT;
+ } else if((sendtag<0 && sendtag != MPI_ANY_TAG)||(recvtag<0 && recvtag != MPI_ANY_TAG)){
+ retval = MPI_ERR_TAG;
+ } else {
+
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ int dst_traced = comm->group()->index(dst);
+ int src_traced = comm->group()->index(src);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_SENDRECV;
+ extra->src = src_traced;
+ extra->dst = dst_traced;
+ int known=0;
+ extra->datatype1 = encode_datatype(sendtype, &known);
+ int dt_size_send = 1;
+ if(known==0)
+ dt_size_send = sendtype->size();
+ extra->send_size = sendcount*dt_size_send;
+ extra->datatype2 = encode_datatype(recvtype, &known);
+ int dt_size_recv = 1;
+ if(known==0)
+ dt_size_recv = recvtype->size();
+ extra->recv_size = recvcount*dt_size_recv;
+
+ TRACE_smpi_ptp_in(rank, src_traced, dst_traced, __FUNCTION__, extra);
+ TRACE_smpi_send(rank, rank, dst_traced, sendtag,sendcount*sendtype->size());
+
+ simgrid::smpi::Request::sendrecv(sendbuf, sendcount, sendtype, dst, sendtag, recvbuf, recvcount, recvtype, src, recvtag, comm,
+ status);
+ retval = MPI_SUCCESS;
+
+ TRACE_smpi_ptp_out(rank, src_traced, dst_traced, __FUNCTION__);
+ TRACE_smpi_recv(rank, src_traced, rank, recvtag);
+ }
+
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Sendrecv_replace(void* buf, int count, MPI_Datatype datatype, int dst, int sendtag, int src, int recvtag,
+ MPI_Comm comm, MPI_Status* status)
+{
+ int retval = 0;
+ if (not datatype->is_valid()) {
+ return MPI_ERR_TYPE;
+ } else if (count < 0) {
+ return MPI_ERR_COUNT;
+ } else {
+ int size = datatype->get_extent() * count;
+ void* recvbuf = xbt_new0(char, size);
+ retval = MPI_Sendrecv(buf, count, datatype, dst, sendtag, recvbuf, count, datatype, src, recvtag, comm, status);
+ if(retval==MPI_SUCCESS){
+ simgrid::smpi::Datatype::copy(recvbuf, count, datatype, buf, count, datatype);
+ }
+ xbt_free(recvbuf);
+
+ }
+ return retval;
+}
+
+int PMPI_Test(MPI_Request * request, int *flag, MPI_Status * status)
+{
+ int retval = 0;
+ smpi_bench_end();
+ if (request == nullptr || flag == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else if (*request == MPI_REQUEST_NULL) {
+ *flag= true;
+ simgrid::smpi::Status::empty(status);
+ retval = MPI_SUCCESS;
+ } else {
+ int rank = ((*request)->comm() != MPI_COMM_NULL) ? smpi_process()->index() : -1;
+
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_TEST;
+ TRACE_smpi_testing_in(rank, extra);
+
+ *flag = simgrid::smpi::Request::test(request,status);
+
+ TRACE_smpi_testing_out(rank);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Testany(int count, MPI_Request requests[], int *index, int *flag, MPI_Status * status)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+ if (index == nullptr || flag == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else {
+ *flag = simgrid::smpi::Request::testany(count, requests, index, status);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Testall(int count, MPI_Request* requests, int* flag, MPI_Status* statuses)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+ if (flag == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else {
+ *flag = simgrid::smpi::Request::testall(count, requests, statuses);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Probe(int source, int tag, MPI_Comm comm, MPI_Status* status) {
+ int retval = 0;
+ smpi_bench_end();
+
+ if (status == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (source == MPI_PROC_NULL) {
+ simgrid::smpi::Status::empty(status);
+ status->MPI_SOURCE = MPI_PROC_NULL;
+ retval = MPI_SUCCESS;
+ } else {
+ simgrid::smpi::Request::probe(source, tag, comm, status);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status) {
+ int retval = 0;
+ smpi_bench_end();
+
+ if (flag == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (source == MPI_PROC_NULL) {
+ *flag=true;
+ simgrid::smpi::Status::empty(status);
+ status->MPI_SOURCE = MPI_PROC_NULL;
+ retval = MPI_SUCCESS;
+ } else {
+ simgrid::smpi::Request::iprobe(source, tag, comm, flag, status);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Wait(MPI_Request * request, MPI_Status * status)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+
+ simgrid::smpi::Status::empty(status);
+
+ if (request == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else if (*request == MPI_REQUEST_NULL) {
+ retval = MPI_SUCCESS;
+ } else {
+
+ int rank = (request!=nullptr && (*request)->comm() != MPI_COMM_NULL) ? smpi_process()->index() : -1;
+
+ int src_traced = (*request)->src();
+ int dst_traced = (*request)->dst();
+ int tag_traced= (*request)->tag();
+ MPI_Comm comm = (*request)->comm();
+ int is_wait_for_receive = ((*request)->flags() & RECV);
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_WAIT;
+ TRACE_smpi_ptp_in(rank, src_traced, dst_traced, __FUNCTION__, extra);
+
+ simgrid::smpi::Request::wait(request, status);
+ retval = MPI_SUCCESS;
+
+ //the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE)
+ TRACE_smpi_ptp_out(rank, src_traced, dst_traced, __FUNCTION__);
+ if (is_wait_for_receive) {
+ if(src_traced==MPI_ANY_SOURCE)
+ src_traced = (status!=MPI_STATUS_IGNORE) ?
+ comm->group()->rank(status->MPI_SOURCE) :
+ src_traced;
+ TRACE_smpi_recv(rank, src_traced, dst_traced, tag_traced);
+ }
+ }
+
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Waitany(int count, MPI_Request requests[], int *index, MPI_Status * status)
+{
+ if (index == nullptr)
+ return MPI_ERR_ARG;
+
+ smpi_bench_end();
+ //save requests information for tracing
+ typedef struct {
+ int src;
+ int dst;
+ int recv;
+ int tag;
+ MPI_Comm comm;
+ } savedvalstype;
+ savedvalstype* savedvals=nullptr;
+ if(count>0){
+ savedvals = xbt_new0(savedvalstype, count);
+ }
+ for (int i = 0; i < count; i++) {
+ MPI_Request req = requests[i]; //already received requests are no longer valid
+ if (req) {
+ savedvals[i]=(savedvalstype){req->src(), req->dst(), (req->flags() & RECV), req->tag(), req->comm()};
+ }
+ }
+ int rank_traced = smpi_process()->index();
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_WAITANY;
+ extra->send_size=count;
+ TRACE_smpi_ptp_in(rank_traced, -1, -1, __FUNCTION__,extra);
+
+ *index = simgrid::smpi::Request::waitany(count, requests, status);
+
+ if(*index!=MPI_UNDEFINED){
+ int src_traced = savedvals[*index].src;
+ //the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE)
+ int dst_traced = savedvals[*index].dst;
+ int is_wait_for_receive = savedvals[*index].recv;
+ if (is_wait_for_receive) {
+ if(savedvals[*index].src==MPI_ANY_SOURCE)
+ src_traced = (status != MPI_STATUSES_IGNORE)
+ ? savedvals[*index].comm->group()->rank(status->MPI_SOURCE)
+ : savedvals[*index].src;
+ TRACE_smpi_recv(rank_traced, src_traced, dst_traced, savedvals[*index].tag);
+ }
+ TRACE_smpi_ptp_out(rank_traced, src_traced, dst_traced, __FUNCTION__);
+ }
+ xbt_free(savedvals);
+
+ smpi_bench_begin();
+ return MPI_SUCCESS;
+}
+
+int PMPI_Waitall(int count, MPI_Request requests[], MPI_Status status[])
+{
+ smpi_bench_end();
+ //save information from requests
+ typedef struct {
+ int src;
+ int dst;
+ int recv;
+ int tag;
+ int valid;
+ MPI_Comm comm;
+ } savedvalstype;
+ savedvalstype* savedvals=xbt_new0(savedvalstype, count);
+
+ for (int i = 0; i < count; i++) {
+ MPI_Request req = requests[i];
+ if(req!=MPI_REQUEST_NULL){
+ savedvals[i]=(savedvalstype){req->src(), req->dst(), (req->flags() & RECV), req->tag(), 1, req->comm()};
+ }else{
+ savedvals[i].valid=0;
+ }
+ }
+ int rank_traced = smpi_process()->index();
+ instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
+ extra->type = TRACING_WAITALL;
+ extra->send_size=count;
+ TRACE_smpi_ptp_in(rank_traced, -1, -1, __FUNCTION__,extra);
+
+ int retval = simgrid::smpi::Request::waitall(count, requests, status);
+
+ for (int i = 0; i < count; i++) {
+ if(savedvals[i].valid){
+ //the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE)
+ int src_traced = savedvals[i].src;
+ int dst_traced = savedvals[i].dst;
+ int is_wait_for_receive = savedvals[i].recv;
+ if (is_wait_for_receive) {
+ if(src_traced==MPI_ANY_SOURCE)
+ src_traced = (status!=MPI_STATUSES_IGNORE) ?
+ savedvals[i].comm->group()->rank(status[i].MPI_SOURCE) : savedvals[i].src;
+ TRACE_smpi_recv(rank_traced, src_traced, dst_traced,savedvals[i].tag);
+ }
+ }
+ }
+ TRACE_smpi_ptp_out(rank_traced, -1, -1, __FUNCTION__);
+ xbt_free(savedvals);
+
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Waitsome(int incount, MPI_Request requests[], int *outcount, int *indices, MPI_Status status[])
+{
+ int retval = 0;
+
+ smpi_bench_end();
+ if (outcount == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else {
+ *outcount = simgrid::smpi::Request::waitsome(incount, requests, indices, status);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Testsome(int incount, MPI_Request requests[], int* outcount, int* indices, MPI_Status status[])
+{
+ int retval = 0;
+
+ smpi_bench_end();
+ if (outcount == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else {
+ *outcount = simgrid::smpi::Request::testsome(incount, requests, indices, status);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+MPI_Request PMPI_Request_f2c(MPI_Fint request){
+ return static_cast<MPI_Request>(simgrid::smpi::Request::f2c(request));
+}
+
+MPI_Fint PMPI_Request_c2f(MPI_Request request) {
+ return request->c2f();
+}
+
+}
--- /dev/null
+/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "private.h"
+#include "smpi_comm.hpp"
+
+XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi);
+
+/* PMPI User level calls */
+extern "C" { // Obviously, the C MPI interface should use the C linkage
+
+/* The topo part of MPI_COMM_WORLD should always be nullptr. When other topologies will be implemented, not only should we
+ * check if the topology is nullptr, but we should check if it is the good topology type (so we have to add a
+ * MPIR_Topo_Type field, and replace the MPI_Topology field by an union)*/
+
+int PMPI_Cart_create(MPI_Comm comm_old, int ndims, int* dims, int* periodic, int reorder, MPI_Comm* comm_cart) {
+ if (comm_old == MPI_COMM_NULL){
+ return MPI_ERR_COMM;
+ } else if (ndims < 0 || (ndims > 0 && (dims == nullptr || periodic == nullptr)) || comm_cart == nullptr) {
+ return MPI_ERR_ARG;
+ } else{
+ simgrid::smpi::Topo_Cart* topo = new simgrid::smpi::Topo_Cart(comm_old, ndims, dims, periodic, reorder, comm_cart);
+ if(*comm_cart==MPI_COMM_NULL)
+ delete topo;
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Cart_rank(MPI_Comm comm, int* coords, int* rank) {
+ if(comm == MPI_COMM_NULL || comm->topo() == nullptr) {
+ return MPI_ERR_TOPOLOGY;
+ }
+ if (coords == nullptr) {
+ return MPI_ERR_ARG;
+ }
+ MPIR_Cart_Topology topo = static_cast<MPIR_Cart_Topology>(comm->topo());
+ if (topo==nullptr) {
+ return MPI_ERR_ARG;
+ }
+ return topo->rank(coords, rank);
+}
+
+int PMPI_Cart_shift(MPI_Comm comm, int direction, int displ, int* source, int* dest) {
+ if(comm == MPI_COMM_NULL || comm->topo() == nullptr) {
+ return MPI_ERR_TOPOLOGY;
+ }
+ if (source == nullptr || dest == nullptr || direction < 0 ) {
+ return MPI_ERR_ARG;
+ }
+ MPIR_Cart_Topology topo = static_cast<MPIR_Cart_Topology>(comm->topo());
+ if (topo==nullptr) {
+ return MPI_ERR_ARG;
+ }
+ return topo->shift(direction, displ, source, dest);
+}
+
+int PMPI_Cart_coords(MPI_Comm comm, int rank, int maxdims, int* coords) {
+ if(comm == MPI_COMM_NULL || comm->topo() == nullptr) {
+ return MPI_ERR_TOPOLOGY;
+ }
+ if (rank < 0 || rank >= comm->size()) {
+ return MPI_ERR_RANK;
+ }
+ if (maxdims <= 0) {
+ return MPI_ERR_ARG;
+ }
+ if(coords == nullptr) {
+ return MPI_ERR_ARG;
+ }
+ MPIR_Cart_Topology topo = static_cast<MPIR_Cart_Topology>(comm->topo());
+ if (topo==nullptr) {
+ return MPI_ERR_ARG;
+ }
+ return topo->coords(rank, maxdims, coords);
+}
+
+int PMPI_Cart_get(MPI_Comm comm, int maxdims, int* dims, int* periods, int* coords) {
+ if(comm == nullptr || comm->topo() == nullptr) {
+ return MPI_ERR_TOPOLOGY;
+ }
+ if(maxdims <= 0 || dims == nullptr || periods == nullptr || coords == nullptr) {
+ return MPI_ERR_ARG;
+ }
+ MPIR_Cart_Topology topo = static_cast<MPIR_Cart_Topology>(comm->topo());
+ if (topo==nullptr) {
+ return MPI_ERR_ARG;
+ }
+ return topo->get(maxdims, dims, periods, coords);
+}
+
+int PMPI_Cartdim_get(MPI_Comm comm, int* ndims) {
+ if (comm == MPI_COMM_NULL || comm->topo() == nullptr) {
+ return MPI_ERR_TOPOLOGY;
+ }
+ if (ndims == nullptr) {
+ return MPI_ERR_ARG;
+ }
+ MPIR_Cart_Topology topo = static_cast<MPIR_Cart_Topology>(comm->topo());
+ if (topo==nullptr) {
+ return MPI_ERR_ARG;
+ }
+ return topo->dim_get(ndims);
+}
+
+int PMPI_Dims_create(int nnodes, int ndims, int* dims) {
+ if(dims == nullptr) {
+ return MPI_ERR_ARG;
+ }
+ if (ndims < 1 || nnodes < 1) {
+ return MPI_ERR_DIMS;
+ }
+ return simgrid::smpi::Topo_Cart::Dims_create(nnodes, ndims, dims);
+}
+
+int PMPI_Cart_sub(MPI_Comm comm, int* remain_dims, MPI_Comm* comm_new) {
+ if(comm == MPI_COMM_NULL || comm->topo() == nullptr) {
+ return MPI_ERR_TOPOLOGY;
+ }
+ if (comm_new == nullptr) {
+ return MPI_ERR_ARG;
+ }
+ MPIR_Cart_Topology topo = static_cast<MPIR_Cart_Topology>(comm->topo());
+ if (topo==nullptr) {
+ return MPI_ERR_ARG;
+ }
+ MPIR_Cart_Topology cart = topo->sub(remain_dims, comm_new);
+ if(*comm_new==MPI_COMM_NULL)
+ delete cart;
+ if(cart==nullptr)
+ return MPI_ERR_ARG;
+ return MPI_SUCCESS;
+}
+
+}
--- /dev/null
+/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "private.h"
+#include "smpi_datatype_derived.hpp"
+
+XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi);
+
+/* PMPI User level calls */
+extern "C" { // Obviously, the C MPI interface should use the C linkage
+
+int PMPI_Type_free(MPI_Datatype * datatype)
+{
+ /* Free a predefined datatype is an error according to the standard, and should be checked for */
+ if (*datatype == MPI_DATATYPE_NULL) {
+ return MPI_ERR_ARG;
+ } else {
+ simgrid::smpi::Datatype::unref(*datatype);
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Type_size(MPI_Datatype datatype, int *size)
+{
+ if (datatype == MPI_DATATYPE_NULL) {
+ return MPI_ERR_TYPE;
+ } else if (size == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ *size = static_cast<int>(datatype->size());
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Type_size_x(MPI_Datatype datatype, MPI_Count *size)
+{
+ if (datatype == MPI_DATATYPE_NULL) {
+ return MPI_ERR_TYPE;
+ } else if (size == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ *size = static_cast<MPI_Count>(datatype->size());
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Type_get_extent(MPI_Datatype datatype, MPI_Aint * lb, MPI_Aint * extent)
+{
+ if (datatype == MPI_DATATYPE_NULL) {
+ return MPI_ERR_TYPE;
+ } else if (lb == nullptr || extent == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ return datatype->extent(lb, extent);
+ }
+}
+
+int PMPI_Type_get_true_extent(MPI_Datatype datatype, MPI_Aint * lb, MPI_Aint * extent)
+{
+ return PMPI_Type_get_extent(datatype, lb, extent);
+}
+
+int PMPI_Type_extent(MPI_Datatype datatype, MPI_Aint * extent)
+{
+ if (datatype == MPI_DATATYPE_NULL) {
+ return MPI_ERR_TYPE;
+ } else if (extent == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ *extent = datatype->get_extent();
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Type_lb(MPI_Datatype datatype, MPI_Aint * disp)
+{
+ if (datatype == MPI_DATATYPE_NULL) {
+ return MPI_ERR_TYPE;
+ } else if (disp == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ *disp = datatype->lb();
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Type_ub(MPI_Datatype datatype, MPI_Aint * disp)
+{
+ if (datatype == MPI_DATATYPE_NULL) {
+ return MPI_ERR_TYPE;
+ } else if (disp == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ *disp = datatype->ub();
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Type_dup(MPI_Datatype datatype, MPI_Datatype *newtype){
+ int retval = MPI_SUCCESS;
+ if (datatype == MPI_DATATYPE_NULL) {
+ retval=MPI_ERR_TYPE;
+ } else {
+ *newtype = new simgrid::smpi::Datatype(datatype, &retval);
+ //error when duplicating, free the new datatype
+ if(retval!=MPI_SUCCESS){
+ simgrid::smpi::Datatype::unref(*newtype);
+ *newtype = MPI_DATATYPE_NULL;
+ }
+ }
+ return retval;
+}
+
+int PMPI_Type_contiguous(int count, MPI_Datatype old_type, MPI_Datatype* new_type) {
+ if (old_type == MPI_DATATYPE_NULL) {
+ return MPI_ERR_TYPE;
+ } else if (count<0){
+ return MPI_ERR_COUNT;
+ } else {
+ return simgrid::smpi::Datatype::create_contiguous(count, old_type, 0, new_type);
+ }
+}
+
+int PMPI_Type_commit(MPI_Datatype* datatype) {
+ if (datatype == nullptr || *datatype == MPI_DATATYPE_NULL) {
+ return MPI_ERR_TYPE;
+ } else {
+ (*datatype)->commit();
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Type_vector(int count, int blocklen, int stride, MPI_Datatype old_type, MPI_Datatype* new_type) {
+ if (old_type == MPI_DATATYPE_NULL) {
+ return MPI_ERR_TYPE;
+ } else if (count<0 || blocklen<0){
+ return MPI_ERR_COUNT;
+ } else {
+ return simgrid::smpi::Datatype::create_vector(count, blocklen, stride, old_type, new_type);
+ }
+}
+
+int PMPI_Type_hvector(int count, int blocklen, MPI_Aint stride, MPI_Datatype old_type, MPI_Datatype* new_type) {
+ if (old_type == MPI_DATATYPE_NULL) {
+ return MPI_ERR_TYPE;
+ } else if (count<0 || blocklen<0){
+ return MPI_ERR_COUNT;
+ } else {
+ return simgrid::smpi::Datatype::create_hvector(count, blocklen, stride, old_type, new_type);
+ }
+}
+
+int PMPI_Type_create_hvector(int count, int blocklen, MPI_Aint stride, MPI_Datatype old_type, MPI_Datatype* new_type) {
+ return MPI_Type_hvector(count, blocklen, stride, old_type, new_type);
+}
+
+int PMPI_Type_indexed(int count, int* blocklens, int* indices, MPI_Datatype old_type, MPI_Datatype* new_type) {
+ if (old_type == MPI_DATATYPE_NULL) {
+ return MPI_ERR_TYPE;
+ } else if (count<0){
+ return MPI_ERR_COUNT;
+ } else {
+ return simgrid::smpi::Datatype::create_indexed(count, blocklens, indices, old_type, new_type);
+ }
+}
+
+int PMPI_Type_create_indexed(int count, int* blocklens, int* indices, MPI_Datatype old_type, MPI_Datatype* new_type) {
+ if (old_type == MPI_DATATYPE_NULL) {
+ return MPI_ERR_TYPE;
+ } else if (count<0){
+ return MPI_ERR_COUNT;
+ } else {
+ return simgrid::smpi::Datatype::create_indexed(count, blocklens, indices, old_type, new_type);
+ }
+}
+
+int PMPI_Type_create_indexed_block(int count, int blocklength, int* indices, MPI_Datatype old_type,
+ MPI_Datatype* new_type)
+{
+ if (old_type == MPI_DATATYPE_NULL) {
+ return MPI_ERR_TYPE;
+ } else if (count<0){
+ return MPI_ERR_COUNT;
+ } else {
+ int* blocklens=static_cast<int*>(xbt_malloc(blocklength*count*sizeof(int)));
+ for (int i = 0; i < count; i++)
+ blocklens[i]=blocklength;
+ int retval = simgrid::smpi::Datatype::create_indexed(count, blocklens, indices, old_type, new_type);
+ xbt_free(blocklens);
+ return retval;
+ }
+}
+
+int PMPI_Type_hindexed(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype old_type, MPI_Datatype* new_type)
+{
+ if (old_type == MPI_DATATYPE_NULL) {
+ return MPI_ERR_TYPE;
+ } else if (count<0){
+ return MPI_ERR_COUNT;
+ } else {
+ return simgrid::smpi::Datatype::create_hindexed(count, blocklens, indices, old_type, new_type);
+ }
+}
+
+int PMPI_Type_create_hindexed(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype old_type,
+ MPI_Datatype* new_type) {
+ return PMPI_Type_hindexed(count, blocklens,indices,old_type,new_type);
+}
+
+int PMPI_Type_create_hindexed_block(int count, int blocklength, MPI_Aint* indices, MPI_Datatype old_type,
+ MPI_Datatype* new_type) {
+ if (old_type == MPI_DATATYPE_NULL) {
+ return MPI_ERR_TYPE;
+ } else if (count<0){
+ return MPI_ERR_COUNT;
+ } else {
+ int* blocklens=(int*)xbt_malloc(blocklength*count*sizeof(int));
+ for (int i = 0; i < count; i++)
+ blocklens[i] = blocklength;
+ int retval = simgrid::smpi::Datatype::create_hindexed(count, blocklens, indices, old_type, new_type);
+ xbt_free(blocklens);
+ return retval;
+ }
+}
+
+int PMPI_Type_struct(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype* old_types, MPI_Datatype* new_type) {
+ if (count<0){
+ return MPI_ERR_COUNT;
+ } else {
+ return simgrid::smpi::Datatype::create_struct(count, blocklens, indices, old_types, new_type);
+ }
+}
+
+int PMPI_Type_create_struct(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype* old_types,
+ MPI_Datatype* new_type) {
+ return PMPI_Type_struct(count, blocklens, indices, old_types, new_type);
+}
+
+int PMPI_Type_create_resized(MPI_Datatype oldtype,MPI_Aint lb, MPI_Aint extent, MPI_Datatype *newtype){
+ if (oldtype == MPI_DATATYPE_NULL) {
+ return MPI_ERR_TYPE;
+ }
+ int blocks[3] = {1, 1, 1};
+ MPI_Aint disps[3] = {lb, 0, lb + extent};
+ MPI_Datatype types[3] = {MPI_LB, oldtype, MPI_UB};
+
+ *newtype = new simgrid::smpi::Type_Struct(oldtype->size(), lb, lb + extent, DT_FLAG_DERIVED, 3, blocks, disps, types);
+
+ (*newtype)->addflag(~DT_FLAG_COMMITED);
+ return MPI_SUCCESS;
+}
+
+
+int PMPI_Type_set_name(MPI_Datatype datatype, char * name)
+{
+ if (datatype == MPI_DATATYPE_NULL) {
+ return MPI_ERR_TYPE;
+ } else if (name == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ datatype->set_name(name);
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Type_get_name(MPI_Datatype datatype, char * name, int* len)
+{
+ if (datatype == MPI_DATATYPE_NULL) {
+ return MPI_ERR_TYPE;
+ } else if (name == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ datatype->get_name(name, len);
+ return MPI_SUCCESS;
+ }
+}
+
+MPI_Datatype PMPI_Type_f2c(MPI_Fint datatype){
+ return static_cast<MPI_Datatype>(simgrid::smpi::F2C::f2c(datatype));
+}
+
+MPI_Fint PMPI_Type_c2f(MPI_Datatype datatype){
+ return datatype->c2f();
+}
+
+int PMPI_Type_get_attr (MPI_Datatype type, int type_keyval, void *attribute_val, int* flag)
+{
+ if (type==MPI_DATATYPE_NULL)
+ return MPI_ERR_TYPE;
+ else
+ return type->attr_get<simgrid::smpi::Datatype>(type_keyval, attribute_val, flag);
+}
+
+int PMPI_Type_set_attr (MPI_Datatype type, int type_keyval, void *attribute_val)
+{
+ if (type==MPI_DATATYPE_NULL)
+ return MPI_ERR_TYPE;
+ else
+ return type->attr_put<simgrid::smpi::Datatype>(type_keyval, attribute_val);
+}
+
+int PMPI_Type_delete_attr (MPI_Datatype type, int type_keyval)
+{
+ if (type==MPI_DATATYPE_NULL)
+ return MPI_ERR_TYPE;
+ else
+ return type->attr_delete<simgrid::smpi::Datatype>(type_keyval);
+}
+
+int PMPI_Type_create_keyval(MPI_Type_copy_attr_function* copy_fn, MPI_Type_delete_attr_function* delete_fn, int* keyval,
+ void* extra_state)
+{
+ smpi_copy_fn _copy_fn={nullptr,copy_fn,nullptr};
+ smpi_delete_fn _delete_fn={nullptr,delete_fn,nullptr};
+ return simgrid::smpi::Keyval::keyval_create<simgrid::smpi::Datatype>(_copy_fn, _delete_fn, keyval, extra_state);
+}
+
+int PMPI_Type_free_keyval(int* keyval) {
+ return simgrid::smpi::Keyval::keyval_free<simgrid::smpi::Datatype>(keyval);
+}
+
+int PMPI_Unpack(void* inbuf, int incount, int* position, void* outbuf, int outcount, MPI_Datatype type, MPI_Comm comm) {
+ if(incount<0 || outcount < 0 || inbuf==nullptr || outbuf==nullptr)
+ return MPI_ERR_ARG;
+ if (not type->is_valid())
+ return MPI_ERR_TYPE;
+ if(comm==MPI_COMM_NULL)
+ return MPI_ERR_COMM;
+ return type->unpack(inbuf, incount, position, outbuf,outcount, comm);
+}
+
+int PMPI_Pack(void* inbuf, int incount, MPI_Datatype type, void* outbuf, int outcount, int* position, MPI_Comm comm) {
+ if(incount<0 || outcount < 0|| inbuf==nullptr || outbuf==nullptr)
+ return MPI_ERR_ARG;
+ if (not type->is_valid())
+ return MPI_ERR_TYPE;
+ if(comm==MPI_COMM_NULL)
+ return MPI_ERR_COMM;
+ return type->pack(inbuf, incount, outbuf,outcount,position, comm);
+}
+
+int PMPI_Pack_size(int incount, MPI_Datatype datatype, MPI_Comm comm, int* size) {
+ if(incount<0)
+ return MPI_ERR_ARG;
+ if (not datatype->is_valid())
+ return MPI_ERR_TYPE;
+ if(comm==MPI_COMM_NULL)
+ return MPI_ERR_COMM;
+
+ *size=incount*datatype->size();
+
+ return MPI_SUCCESS;
+}
+
+}
--- /dev/null
+/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "private.h"
+#include "smpi_comm.hpp"
+#include "smpi_coll.hpp"
+#include "smpi_datatype_derived.hpp"
+#include "smpi_op.hpp"
+#include "smpi_process.hpp"
+#include "smpi_win.hpp"
+
+XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi);
+
+/* PMPI User level calls */
+extern "C" { // Obviously, the C MPI interface should use the C linkage
+
+int PMPI_Win_create( void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, MPI_Win *win){
+ int retval = 0;
+ smpi_bench_end();
+ if (comm == MPI_COMM_NULL) {
+ retval= MPI_ERR_COMM;
+ }else if ((base == nullptr && size != 0) || disp_unit <= 0 || size < 0 ){
+ retval= MPI_ERR_OTHER;
+ }else{
+ *win = new simgrid::smpi::Win( base, size, disp_unit, info, comm);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_allocate( MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, void *base, MPI_Win *win){
+ int retval = 0;
+ smpi_bench_end();
+ if (comm == MPI_COMM_NULL) {
+ retval= MPI_ERR_COMM;
+ }else if (disp_unit <= 0 || size < 0 ){
+ retval= MPI_ERR_OTHER;
+ }else{
+ void* ptr = xbt_malloc(size);
+ if(ptr==nullptr)
+ return MPI_ERR_NO_MEM;
+ *static_cast<void**>(base) = ptr;
+ *win = new simgrid::smpi::Win( ptr, size, disp_unit, info, comm,1);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_create_dynamic( MPI_Info info, MPI_Comm comm, MPI_Win *win){
+ int retval = 0;
+ smpi_bench_end();
+ if (comm == MPI_COMM_NULL) {
+ retval= MPI_ERR_COMM;
+ }else{
+ *win = new simgrid::smpi::Win(info, comm);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_attach(MPI_Win win, void *base, MPI_Aint size){
+ int retval = 0;
+ smpi_bench_end();
+ if(win == MPI_WIN_NULL){
+ retval = MPI_ERR_WIN;
+ } else if ((base == nullptr && size != 0) || size < 0 ){
+ retval= MPI_ERR_OTHER;
+ }else{
+ retval = win->attach(base, size);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_detach(MPI_Win win, void *base){
+ int retval = 0;
+ smpi_bench_end();
+ if(win == MPI_WIN_NULL){
+ retval = MPI_ERR_WIN;
+ } else if (base == nullptr){
+ retval= MPI_ERR_OTHER;
+ }else{
+ retval = win->detach(base);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+
+int PMPI_Win_free( MPI_Win* win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == nullptr || *win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ }else{
+ delete *win;
+ retval=MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_set_name(MPI_Win win, char * name)
+{
+ if (win == MPI_WIN_NULL) {
+ return MPI_ERR_TYPE;
+ } else if (name == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ win->set_name(name);
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Win_get_name(MPI_Win win, char * name, int* len)
+{
+ if (win == MPI_WIN_NULL) {
+ return MPI_ERR_WIN;
+ } else if (name == nullptr) {
+ return MPI_ERR_ARG;
+ } else {
+ win->get_name(name, len);
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Win_get_info(MPI_Win win, MPI_Info* info)
+{
+ if (win == MPI_WIN_NULL) {
+ return MPI_ERR_WIN;
+ } else {
+ *info = win->info();
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Win_set_info(MPI_Win win, MPI_Info info)
+{
+ if (win == MPI_WIN_NULL) {
+ return MPI_ERR_TYPE;
+ } else {
+ win->set_info(info);
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Win_get_group(MPI_Win win, MPI_Group * group){
+ if (win == MPI_WIN_NULL) {
+ return MPI_ERR_WIN;
+ }else {
+ win->get_group(group);
+ (*group)->ref();
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Win_fence( int assert, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else {
+ int rank = smpi_process()->index();
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr);
+ retval = win->fence(assert);
+ TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if ((origin_count < 0 || target_count < 0) ||
+ (origin_addr==nullptr && origin_count > 0)){
+ retval = MPI_ERR_COUNT;
+ } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) {
+ retval = MPI_ERR_TYPE;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int src_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
+
+ retval = win->get( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
+ target_datatype);
+
+ TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Rget( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request* request){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ *request = MPI_REQUEST_NULL;
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if ((origin_count < 0 || target_count < 0) ||
+ (origin_addr==nullptr && origin_count > 0)){
+ retval = MPI_ERR_COUNT;
+ } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) {
+ retval = MPI_ERR_TYPE;
+ } else if(request == nullptr){
+ retval = MPI_ERR_REQUEST;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int src_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
+
+ retval = win->get( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
+ target_datatype, request);
+
+ TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if ((origin_count < 0 || target_count < 0) ||
+ (origin_addr==nullptr && origin_count > 0)){
+ retval = MPI_ERR_COUNT;
+ } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) {
+ retval = MPI_ERR_TYPE;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int dst_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, nullptr);
+ TRACE_smpi_send(rank, rank, dst_traced, SMPI_RMA_TAG, origin_count*origin_datatype->size());
+
+ retval = win->put( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
+ target_datatype);
+
+ TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Rput( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request* request){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ *request = MPI_REQUEST_NULL;
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if ((origin_count < 0 || target_count < 0) ||
+ (origin_addr==nullptr && origin_count > 0)){
+ retval = MPI_ERR_COUNT;
+ } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) {
+ retval = MPI_ERR_TYPE;
+ } else if(request == nullptr){
+ retval = MPI_ERR_REQUEST;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int dst_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, nullptr);
+ TRACE_smpi_send(rank, rank, dst_traced, SMPI_RMA_TAG, origin_count*origin_datatype->size());
+
+ retval = win->put( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
+ target_datatype, request);
+
+ TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if ((origin_count < 0 || target_count < 0) ||
+ (origin_addr==nullptr && origin_count > 0)){
+ retval = MPI_ERR_COUNT;
+ } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) {
+ retval = MPI_ERR_TYPE;
+ } else if (op == MPI_OP_NULL) {
+ retval = MPI_ERR_OP;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int src_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
+
+ retval = win->accumulate( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
+ target_datatype, op);
+
+ TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Raccumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ *request = MPI_REQUEST_NULL;
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if ((origin_count < 0 || target_count < 0) ||
+ (origin_addr==nullptr && origin_count > 0)){
+ retval = MPI_ERR_COUNT;
+ } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) {
+ retval = MPI_ERR_TYPE;
+ } else if (op == MPI_OP_NULL) {
+ retval = MPI_ERR_OP;
+ } else if(request == nullptr){
+ retval = MPI_ERR_REQUEST;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int src_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
+
+ retval = win->accumulate( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
+ target_datatype, op, request);
+
+ TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Get_accumulate(void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr,
+int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count,
+MPI_Datatype target_datatype, MPI_Op op, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if ((origin_count < 0 || target_count < 0 || result_count <0) ||
+ (origin_addr==nullptr && origin_count > 0 && op != MPI_NO_OP) ||
+ (result_addr==nullptr && result_count > 0)){
+ retval = MPI_ERR_COUNT;
+ } else if ((origin_datatype != MPI_DATATYPE_NULL && not origin_datatype->is_valid()) ||
+ (not target_datatype->is_valid()) || (not result_datatype->is_valid())) {
+ retval = MPI_ERR_TYPE;
+ } else if (op == MPI_OP_NULL) {
+ retval = MPI_ERR_OP;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int src_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
+
+ retval = win->get_accumulate( origin_addr, origin_count, origin_datatype, result_addr,
+ result_count, result_datatype, target_rank, target_disp,
+ target_count, target_datatype, op);
+
+ TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+
+int PMPI_Rget_accumulate(void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr,
+int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count,
+MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ *request = MPI_REQUEST_NULL;
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if ((origin_count < 0 || target_count < 0 || result_count <0) ||
+ (origin_addr==nullptr && origin_count > 0 && op != MPI_NO_OP) ||
+ (result_addr==nullptr && result_count > 0)){
+ retval = MPI_ERR_COUNT;
+ } else if ((origin_datatype != MPI_DATATYPE_NULL && not origin_datatype->is_valid()) ||
+ (not target_datatype->is_valid()) || (not result_datatype->is_valid())) {
+ retval = MPI_ERR_TYPE;
+ } else if (op == MPI_OP_NULL) {
+ retval = MPI_ERR_OP;
+ } else if(request == nullptr){
+ retval = MPI_ERR_REQUEST;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int src_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
+
+ retval = win->get_accumulate( origin_addr, origin_count, origin_datatype, result_addr,
+ result_count, result_datatype, target_rank, target_disp,
+ target_count, target_datatype, op, request);
+
+ TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Fetch_and_op(void *origin_addr, void *result_addr, MPI_Datatype dtype, int target_rank, MPI_Aint target_disp, MPI_Op op, MPI_Win win){
+ return PMPI_Get_accumulate(origin_addr, origin_addr==nullptr?0:1, dtype, result_addr, 1, dtype, target_rank, target_disp, 1, dtype, op, win);
+}
+
+int PMPI_Compare_and_swap(void *origin_addr, void *compare_addr,
+ void *result_addr, MPI_Datatype datatype, int target_rank,
+ MPI_Aint target_disp, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if (origin_addr==nullptr || result_addr==nullptr || compare_addr==nullptr){
+ retval = MPI_ERR_COUNT;
+ } else if (not datatype->is_valid()) {
+ retval = MPI_ERR_TYPE;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int src_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
+
+ retval = win->compare_and_swap( origin_addr, compare_addr, result_addr, datatype,
+ target_rank, target_disp);
+
+ TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_post(MPI_Group group, int assert, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (group==MPI_GROUP_NULL){
+ retval = MPI_ERR_GROUP;
+ } else {
+ int rank = smpi_process()->index();
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr);
+ retval = win->post(group,assert);
+ TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_start(MPI_Group group, int assert, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (group==MPI_GROUP_NULL){
+ retval = MPI_ERR_GROUP;
+ } else {
+ int rank = smpi_process()->index();
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr);
+ retval = win->start(group,assert);
+ TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_complete(MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else {
+ int rank = smpi_process()->index();
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr);
+
+ retval = win->complete();
+
+ TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_wait(MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else {
+ int rank = smpi_process()->index();
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr);
+
+ retval = win->wait();
+
+ TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_lock(int lock_type, int rank, int assert, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (lock_type != MPI_LOCK_EXCLUSIVE &&
+ lock_type != MPI_LOCK_SHARED) {
+ retval = MPI_ERR_LOCKTYPE;
+ } else if (rank == MPI_PROC_NULL){
+ retval = MPI_SUCCESS;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->lock(lock_type,rank,assert);
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_unlock(int rank, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (rank == MPI_PROC_NULL){
+ retval = MPI_SUCCESS;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->unlock(rank);
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_lock_all(int assert, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->lock_all(assert);
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_unlock_all(MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->unlock_all();
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_flush(int rank, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (rank == MPI_PROC_NULL){
+ retval = MPI_SUCCESS;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->flush(rank);
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_flush_local(int rank, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (rank == MPI_PROC_NULL){
+ retval = MPI_SUCCESS;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->flush_local(rank);
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_flush_all(MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->flush_all();
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_flush_local_all(MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->flush_local_all();
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+
+int PMPI_Win_get_attr (MPI_Win win, int keyval, void *attribute_val, int* flag)
+{
+ static MPI_Aint size;
+ static int disp_unit;
+ if (win==MPI_WIN_NULL)
+ return MPI_ERR_TYPE;
+ else{
+ switch (keyval) {
+ case MPI_WIN_BASE :
+ *static_cast<void**>(attribute_val) = win->base();
+ *flag = 1;
+ return MPI_SUCCESS;
+ case MPI_WIN_SIZE :
+ size = win->size();
+ *static_cast<MPI_Aint**>(attribute_val) = &size;
+ *flag = 1;
+ return MPI_SUCCESS;
+ case MPI_WIN_DISP_UNIT :
+ disp_unit=win->disp_unit();
+ *static_cast<int**>(attribute_val) = &disp_unit;
+ *flag = 1;
+ return MPI_SUCCESS;
+ default:
+ return win->attr_get<simgrid::smpi::Win>(keyval, attribute_val, flag);
+ }
+}
+
+}
+
+int PMPI_Win_set_attr (MPI_Win win, int type_keyval, void *attribute_val)
+{
+ if (win==MPI_WIN_NULL)
+ return MPI_ERR_TYPE;
+ else
+ return win->attr_put<simgrid::smpi::Win>(type_keyval, attribute_val);
+}
+
+int PMPI_Win_delete_attr (MPI_Win win, int type_keyval)
+{
+ if (win==MPI_WIN_NULL)
+ return MPI_ERR_TYPE;
+ else
+ return win->attr_delete<simgrid::smpi::Win>(type_keyval);
+}
+
+int PMPI_Win_create_keyval(MPI_Win_copy_attr_function* copy_fn, MPI_Win_delete_attr_function* delete_fn, int* keyval,
+ void* extra_state)
+{
+ smpi_copy_fn _copy_fn={nullptr, nullptr, copy_fn};
+ smpi_delete_fn _delete_fn={nullptr, nullptr, delete_fn};
+ return simgrid::smpi::Keyval::keyval_create<simgrid::smpi::Win>(_copy_fn, _delete_fn, keyval, extra_state);
+}
+
+int PMPI_Win_free_keyval(int* keyval) {
+ return simgrid::smpi::Keyval::keyval_free<simgrid::smpi::Win>(keyval);
+}
+
+MPI_Win PMPI_Win_f2c(MPI_Fint win){
+ return static_cast<MPI_Win>(simgrid::smpi::Win::f2c(win));
+}
+
+MPI_Fint PMPI_Win_c2f(MPI_Win win){
+ return win->c2f();
+}
+
+}
//SMPI_RMA_TAG has to be the smallest one, as it will be decremented for accumulate ordering.
#define SMPI_RMA_TAG -6666
+/* Convert between Fortran and C */
+
+#define FORT_BOTTOM(addr) ((*(int*)addr) == -200 ? MPI_BOTTOM : (void*)addr)
+#define FORT_IN_PLACE(addr) ((*(int*)addr) == -100 ? MPI_IN_PLACE : (void*)addr)
+#define FORT_STATUS_IGNORE(addr) (static_cast<MPI_Status*>((*(int*)addr) == -300 ? MPI_STATUS_IGNORE : (void*)addr))
+#define FORT_STATUSES_IGNORE(addr) (static_cast<MPI_Status*>((*(int*)addr) == -400 ? MPI_STATUSES_IGNORE : (void*)addr))
+
extern XBT_PRIVATE MPI_Comm MPI_COMM_UNINITIALIZED;
typedef SMPI_Cart_topology *MPIR_Cart_Topology;
void mpi_comm_split_(int* comm, int* color, int* key, int* comm_out, int* ierr);
void mpi_group_incl_(int* group, int* n, int* key, int* group_out, int* ierr) ;
void mpi_comm_group_(int* comm, int* group_out, int* ierr);
+void mpi_comm_create_group_ (int* comm, int* group, int, int* comm_out, int* ierr);
void mpi_send_init_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr);
void mpi_isend_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr);
void mpi_irsend_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr);
void mpi_attr_get_(int* comm, int* keyval, void* attr_value, int* flag, int* ierr );
void mpi_type_commit_(int* datatype, int* ierr);
void mpi_type_vector_(int* count, int* blocklen, int* stride, int* old_type, int* newtype, int* ierr);
-void mpi_type_create_vector_(int* count, int* blocklen, int* stride, int* old_type, int* newtype, int* ierr);
void mpi_type_hvector_(int* count, int* blocklen, MPI_Aint* stride, int* old_type, int* newtype, int* ierr);
void mpi_type_create_hvector_(int* count, int* blocklen, MPI_Aint* stride, int* old_type, int* newtype, int* ierr);
void mpi_type_free_(int* datatype, int* ierr);
void mpi_win_start_(int* group, int assert, int* win, int* ierr);
void mpi_win_complete_(int* win, int* ierr);
void mpi_win_wait_(int* win, int* ierr);
+void mpi_win_allocate_( MPI_Aint* size, int* disp_unit, int* info, int* comm, void* base, int* win, int* ierr);
+void mpi_win_attach_(int* win, int* base, MPI_Aint* size, int* ierr);
+void mpi_win_create_dynamic_( int* info, int* comm, int *win, int* ierr);
+void mpi_win_detach_(int* win, int* base, int* ierr);
+void mpi_win_set_info_(int* win, int* info, int* ierr);
+void mpi_win_get_info_(int* win, int* info, int* ierr);
+void mpi_win_get_group_(int* win, int* group, int* ierr);
+void mpi_win_get_attr_(int* win, int* type_keyval, void* attribute_val, int* flag, int* ierr);
+void mpi_win_set_attr_(int* win, int* type_keyval, void* att, int* ierr);
+void mpi_win_delete_attr_(int* win, int* comm_keyval, int* ierr);
+void mpi_win_create_keyval_(void* copy_fn, void* delete_fn, int* keyval, void* extra_state, int* ierr);
+void mpi_win_free_keyval_(int* keyval, int* ierr);
+void mpi_win_lock_(int* lock_type, int* rank, int* assert, int* win, int* ierr);
+void mpi_win_lock_all_(int* assert, int* win, int* ierr);
+void mpi_win_unlock_(int* rank, int* win, int* ierr);
+void mpi_win_unlock_all_(int* win, int* ierr);
+void mpi_win_flush_(int* rank, int* win, int* ierr);
+void mpi_win_flush_local_(int* rank, int* win, int* ierr);
+void mpi_win_flush_all_(int* win, int* ierr);
+void mpi_win_flush_local_all_(int* win, int* ierr);
void mpi_info_create_( int *info, int* ierr);
void mpi_info_set_( int *info, char *key, char *value, int* ierr, unsigned int keylen, unsigned int valuelen);
void mpi_info_free_(int* info, int* ierr);
MPI_Aint* target_disp, int* target_count, int* target_datatype, int* win, int* ierr);
void mpi_put_( int *origin_addr, int* origin_count, int* origin_datatype, int* target_rank,
MPI_Aint* target_disp, int* target_count, int* target_datatype, int* win, int* ierr);
+void mpi_rget_( int *origin_addr, int* origin_count, int* origin_datatype, int* target_rank,
+ MPI_Aint* target_disp, int* target_count, int* target_datatype, int* win, int* request, int* ierr);
+void mpi_rput_( int *origin_addr, int* origin_count, int* origin_datatype, int* target_rank,
+ MPI_Aint* target_disp, int* target_count, int* target_datatype, int* win, int* request, int* ierr);
+void mpi_fetch_and_op_( int *origin_addr, int* result_addr, int* datatype, int* target_rank, MPI_Aint* target_disp, int* op, int* win, int* ierr);
+void mpi_compare_and_swap_( int *origin_addr, int* compare_addr, int* result_addr,
+ int* datatype, int* target_rank, MPI_Aint* target_disp, int* win, int* ierr);
+void mpi_get_accumulate_(int *origin_addr, int* origin_count, int* origin_datatype, int* result_addr,
+ int* result_count, int* result_datatype, int* target_rank, MPI_Aint* target_disp, int* target_count,
+ int* target_datatype, int* op, int* win, int* ierr);
+void mpi_rget_accumulate_(int *origin_addr, int* origin_count, int* origin_datatype, int* result_addr,
+ int* result_count, int* result_datatype, int* target_rank, MPI_Aint* target_disp, int* target_count,
+ int* target_datatype, int* op, int* win, int* request, int* ierr);
void mpi_accumulate_( int *origin_addr, int* origin_count, int* origin_datatype, int* target_rank,
MPI_Aint* target_disp, int* target_count, int* target_datatype, int* op, int* win, int* ierr);
+void mpi_raccumulate_( int *origin_addr, int* origin_count, int* origin_datatype, int* target_rank,
+ MPI_Aint* target_disp, int* target_count, int* target_datatype, int* op, int* win, int* request, int* ierr);
void mpi_error_string_(int* errorcode, char* string, int* resultlen, int* ierr);
void mpi_sendrecv_(void* sendbuf, int* sendcount, int* sendtype, int* dst, int* sendtag, void *recvbuf, int* recvcount,
int* recvtype, int* src, int* recvtag, int* comm, MPI_Status* status, int* ierr);
void mpi_type_get_true_extent_ (int* datatype, MPI_Aint * lb, MPI_Aint * extent, int* ierr);
void mpi_op_create_ (void * function, int* commute, int* op, int* ierr);
void mpi_op_free_ (int* op, int* ierr);
+void mpi_op_commutative_ (int* op, int* commute, int* ierr);
void mpi_group_free_ (int* group, int* ierr);
void mpi_group_size_ (int* group, int *size, int* ierr);
void mpi_group_rank_ (int* group, int *rank, int* ierr);
void mpi_type_create_hindexed_block_ (int* count, int* blocklength, MPI_Aint* indices, int* old_type, int* newtype,
int* ierr) ;
void mpi_type_indexed_ (int* count, int* blocklens, int* indices, int* old_type, int* newtype, int* ierr) ;
+void mpi_type_create_indexed_ (int* count, int* blocklens, int* indices, int* old_type, int* newtype, int* ierr) ;
void mpi_type_create_indexed_block_ (int* count, int* blocklength, int* indices, int* old_type, int*newtype,
int* ierr);
void mpi_type_struct_ (int* count, int* blocklens, MPI_Aint* indices, int* old_types, int* newtype, int* ierr) ;
if (PJ_container_get_root() == nullptr){
PJ_container_alloc ();
- PJ_type_alloc();
container_t root = PJ_container_new (id, INSTR_AS, nullptr);
PJ_container_set_root (root);
add_executable(winaccf winaccf.f)
# add_executable(winerrf winerrf.f)
add_executable(winfencef winfencef.f)
-# add_executable(wingroupf wingroupf.f)
+ add_executable(wingroupf wingroupf.f)
# add_executable(baseattrwinf baseattrwinf.f)
# add_executable(winattr2f winattr2f.f)
# add_executable(winattrf winattrf.f)
target_link_libraries(winaccf simgrid mtest_f77)
#target_link_libraries(winerrf simgrid mtest_f77)
target_link_libraries(winfencef simgrid mtest_f77)
-#target_link_libraries(wingroupf simgrid mtest_f77)
+target_link_libraries(wingroupf simgrid mtest_f77)
#target_link_libraries(baseattrwinf simgrid mtest_f77)
target_link_libraries(c2f2cwinf simgrid mtest_f77)
#target_link_libraries(winattr2f simgrid mtest_f77)
${CMAKE_CURRENT_SOURCE_DIR}/winscale1f.f
${CMAKE_CURRENT_SOURCE_DIR}/winscale2f.f
${CMAKE_CURRENT_SOURCE_DIR}/addsize.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/attraints.h
PARENT_SCOPE)
set(txt_files
${txt_files}
--- /dev/null
+C -*- Mode: Fortran; -*-
+C
+C (C) 2003 by Argonne National Laboratory.
+C See COPYRIGHT in top-level directory.
+C
+ integer extrastate, valin, valout, val
#Needs win error handling
#winerrf 1
winnamef 1
-#Needs win get group
-#wingroupf 4
+wingroupf 4 needs_privatization=1
winaccf 4 needs_privatization=1
-#Needs mpi_win_f2c
c2f2cwinf 1
-#Needs attr
#baseattrwinf 1
#winattrf 1
#winattr2f 1
add_executable(winaccf90 winaccf90.f90)
# add_executable(winerrf90 winerrf90.f90)
add_executable(winfencef90 winfencef90.f90)
-# add_executable(wingroupf90 wingroupf90.f90)
+ add_executable(wingroupf90 wingroupf90.f90)
# add_executable(baseattrwinf90 baseattrwinf90.f90)
# add_executable(winattr2f90 winattr2f90.f90)
# add_executable(winattrf90 winattrf90.f90)
target_link_libraries(winaccf90 simgrid mtest_f90)
#target_link_libraries(winerrf90 simgrid mtest_f90)
target_link_libraries(winfencef90 simgrid mtest_f90)
-#target_link_libraries(wingroupf90 simgrid mtest_f90)
+target_link_libraries(wingroupf90 simgrid mtest_f90)
#target_link_libraries(baseattrwinf90 simgrid mtest_f90)
target_link_libraries(c2f2cwinf90 simgrid mtest_f90)
#target_link_libraries(winattr2f90 simgrid mtest_f90)
#winscale2f90 4
#winerrf90 1
winnamef90 1
-#wingroupf90 4
+wingroupf90 4 needs_privatization=1
winaccf90 4 needs_privatization=1
c2f2cwinf90 1
#baseattrwinf90 1
src/smpi/smpi_main.c
src/smpi/bindings/smpi_mpi.cpp
src/smpi/bindings/smpi_pmpi.cpp
+ src/smpi/bindings/smpi_pmpi_coll.cpp
+ src/smpi/bindings/smpi_pmpi_comm.cpp
+ src/smpi/bindings/smpi_pmpi_group.cpp
+ src/smpi/bindings/smpi_pmpi_info.cpp
+ src/smpi/bindings/smpi_pmpi_op.cpp
+ src/smpi/bindings/smpi_pmpi_request.cpp
+ src/smpi/bindings/smpi_pmpi_topo.cpp
+ src/smpi/bindings/smpi_pmpi_type.cpp
+ src/smpi/bindings/smpi_pmpi_win.cpp
src/smpi/bindings/smpi_f77.cpp
+ src/smpi/bindings/smpi_f77_coll.cpp
+ src/smpi/bindings/smpi_f77_comm.cpp
+ src/smpi/bindings/smpi_f77_request.cpp
+ src/smpi/bindings/smpi_f77_type.cpp
src/smpi/colls/allgather/allgather-2dmesh.cpp
src/smpi/colls/allgather/allgather-3dmesh.cpp
src/smpi/colls/allgather/allgather-GB.cpp
fun:malloc
...
fun:dlopen@@GLIBC_*
- ...
- fun:main
}
{
fun:calloc
...
fun:dlopen@@GLIBC_*
- ...
- fun:main
}
# Memory leaks appearing to be in libcgraph. They can be seen with the