From: Martin Quinson Date: Tue, 18 Jul 2017 00:31:29 +0000 (+0200) Subject: Merge branch 'master' of github.com:simgrid/simgrid X-Git-Tag: v3_17~368 X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/commitdiff_plain/43f7ca1cac5ab1858e318fdd6239d0a0c3b3d893?hp=d33e7a563a884247bff85406dcc589a70a162e79 Merge branch 'master' of github.com:simgrid/simgrid --- diff --git a/CMakeLists.txt b/CMakeLists.txt index 7dea4f7bd7..19a9267fe4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -167,6 +167,10 @@ set(INCLUDES ${CMAKE_HOME_DIRECTORY}/src/include ) +if(enable_smpi) + set (INCLUDES ${INCLUDES} ${CMAKE_HOME_DIRECTORY}/src/smpi/include) +endif() + if(NOT CMAKE_CROSSCOMPILING AND EXISTS /usr/include/) set(INCLUDES ${INCLUDES} /usr/include/) endif() diff --git a/include/simgrid/forward.h b/include/simgrid/forward.h index 21826783fe..c3c0cbc7f1 100644 --- a/include/simgrid/forward.h +++ b/include/simgrid/forward.h @@ -117,26 +117,25 @@ typedef tmgr_Trace *tmgr_trace_t; /**< Opaque structure defining an availability typedef struct s_smx_simcall s_smx_simcall_t; typedef struct s_smx_simcall* smx_simcall_t; -typedef enum { +typedef enum { // FIXME: move this to s4u::Link; make it an enum class SURF_LINK_FULLDUPLEX = 2, - SURF_LINK_SHARED = 1, - SURF_LINK_FATPIPE = 0 + SURF_LINK_SHARED = 1, + SURF_LINK_FATPIPE = 0 } e_surf_link_sharing_policy_t; -typedef enum { +typedef enum { // FIXME: move this away; make it an enum class SURF_TRACE_CONNECT_KIND_HOST_AVAIL = 4, - SURF_TRACE_CONNECT_KIND_SPEED = 3, + SURF_TRACE_CONNECT_KIND_SPEED = 3, SURF_TRACE_CONNECT_KIND_LINK_AVAIL = 2, - SURF_TRACE_CONNECT_KIND_BANDWIDTH = 1, - SURF_TRACE_CONNECT_KIND_LATENCY = 0 + SURF_TRACE_CONNECT_KIND_BANDWIDTH = 1, + SURF_TRACE_CONNECT_KIND_LATENCY = 0 } e_surf_trace_connect_kind_t; -typedef enum { - SURF_ACTOR_ON_FAILURE_DIE = 1, +typedef enum { // FIXME: move this to s4u::Actor; make it an enum class + SURF_ACTOR_ON_FAILURE_DIE = 1, SURF_ACTOR_ON_FAILURE_RESTART = 0 } e_surf_process_on_failure_t; - /** @ingroup m_datatypes_management_details * @brief Type for any simgrid size */ diff --git a/include/smpi/mpif.h.in b/include/smpi/mpif.h.in index d6ffa2f444..8af2cdf881 100644 --- a/include/smpi/mpif.h.in +++ b/include/smpi/mpif.h.in @@ -82,6 +82,11 @@ integer MPI_COMM_NULL_DUP_FN, MPI_COMM_DUP_FN parameter(MPI_COMM_NULL_DUP_FN =0) parameter(MPI_COMM_DUP_FN =0) + integer MPI_WIN_NULL_COPY_FN, MPI_WIN_NULL_DELETE_FN + parameter(MPI_WIN_NULL_COPY_FN =0) + parameter(MPI_WIN_NULL_DELETE_FN =0) + integer MPI_WIN_DUP_FN + parameter(MPI_WIN_DUP_FN =0) integer MPI_ROOT, MPI_COMM_TYPE_SHARED parameter(MPI_ROOT=0) @@ -153,6 +158,12 @@ parameter(MPI_GROUP_EMPTY=-2) parameter(MPI_WIN_NULL=-1) + integer MPI_WIN_BASE, MPI_WIN_SIZE, MPI_WIN_DISP_UNIT + + parameter(MPI_WIN_BASE=-1) + parameter(MPI_WIN_SIZE=-2) + parameter(MPI_WIN_DISP_UNIT=-3) + ! These IDs have to be unique, consecutive ! and ordered as in smpi_f77.cpp. parameter(MPI_COMM_WORLD=0) diff --git a/src/instr/instr_paje_trace.cpp b/src/instr/instr_paje_trace.cpp index 9a0a954a35..cfaf78f6fe 100644 --- a/src/instr/instr_paje_trace.cpp +++ b/src/instr/instr_paje_trace.cpp @@ -6,7 +6,7 @@ #include "src/instr/instr_private.h" #include "src/instr/instr_smpi.h" -#include "src/smpi/private.hpp" +#include "src/smpi/include/private.hpp" #include "typeinfo" #include "xbt/virtu.h" /* sg_cmdline */ #include "simgrid/sg_config.h" diff --git a/src/mc/ObjectInformation.hpp b/src/mc/ObjectInformation.hpp index 21ed47157a..019e75baf5 100644 --- a/src/mc/ObjectInformation.hpp +++ b/src/mc/ObjectInformation.hpp @@ -19,7 +19,7 @@ #include "src/mc/Type.hpp" #include "src/mc/Frame.hpp" -#include "src/smpi/private.h" +#include "src/smpi/include/private.h" namespace simgrid { namespace mc { diff --git a/src/mc/Session.cpp b/src/mc/Session.cpp index 566858b8ad..d38c528217 100644 --- a/src/mc/Session.cpp +++ b/src/mc/Session.cpp @@ -20,7 +20,7 @@ #include "src/mc/mc_private.h" #include "src/mc/checker/Checker.hpp" -#include "src/smpi/private.hpp" +#include "src/smpi/include/private.hpp" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(mc_Session, mc, "Model-checker session"); diff --git a/src/mc/checker/CommunicationDeterminismChecker.cpp b/src/mc/checker/CommunicationDeterminismChecker.cpp index cb723fb60e..fad3f3ade6 100644 --- a/src/mc/checker/CommunicationDeterminismChecker.cpp +++ b/src/mc/checker/CommunicationDeterminismChecker.cpp @@ -21,7 +21,7 @@ #include "src/mc/mc_state.h" #include "src/mc/remote/Client.hpp" -#include "src/smpi/smpi_request.hpp" +#include "smpi_request.hpp" using simgrid::mc::remote; diff --git a/src/mc/compare.cpp b/src/mc/compare.cpp index 1fe48f4e3a..fabd66bd2f 100644 --- a/src/mc/compare.cpp +++ b/src/mc/compare.cpp @@ -25,8 +25,8 @@ #include "src/xbt/mmalloc/mmprivate.h" #if HAVE_SMPI -#include "src/smpi/private.h" -#include "src/smpi/private.hpp" +#include "src/smpi/include/private.h" +#include "src/smpi/include/private.hpp" #endif #include "src/mc/mc_forward.hpp" diff --git a/src/mc/mc_base.cpp b/src/mc/mc_base.cpp index e567c57397..90fedffd0a 100644 --- a/src/mc/mc_base.cpp +++ b/src/mc/mc_base.cpp @@ -33,17 +33,16 @@ namespace mc { void wait_for_requests() { #if SIMGRID_HAVE_MC - xbt_assert(mc_model_checker == nullptr); + xbt_assert(mc_model_checker == nullptr, "This must be called from the client"); #endif smx_actor_t process; - smx_simcall_t req; unsigned int iter; while (not xbt_dynar_is_empty(simix_global->process_to_run)) { SIMIX_process_runall(); xbt_dynar_foreach(simix_global->process_that_ran, iter, process) { - req = &process->simcall; + smx_simcall_t req = &process->simcall; if (req->call != SIMCALL_NONE && not simgrid::mc::request_is_visible(req)) SIMIX_simcall_handle(req, 0); } @@ -70,8 +69,15 @@ void wait_for_requests() // Called from both MCer and MCed: bool actor_is_enabled(smx_actor_t actor) { +#if SIMGRID_HAVE_MC + // If in the MCer, ask the client app since it has all the data + if (mc_model_checker != nullptr) { + return mc_model_checker->process().actor_is_enabled(actor->pid); + } +#endif + + // Now, we are in the client app, no need for remote memory reading. smx_simcall_t req = &actor->simcall; - // TODO, add support for the subtypes? switch (req->call) { case SIMCALL_NONE: @@ -82,16 +88,6 @@ bool actor_is_enabled(smx_actor_t actor) simgrid::kernel::activity::CommImpl* act = static_cast(simcall_comm_wait__getraw__comm(req)); -#if SIMGRID_HAVE_MC - // Fetch from MCed memory: - // HACK, type puning - if (mc_model_checker != nullptr) { - simgrid::mc::Remote temp_comm; - mc_model_checker->process().read(temp_comm, remote(act)); - act = static_cast(temp_comm.getBuffer()); - } -#endif - if (act->src_timeout || act->dst_timeout) { /* If it has a timeout it will be always be enabled (regardless of who declared the timeout), * because even if the communication is not ready, it can timeout and won't block. */ @@ -109,38 +105,10 @@ bool actor_is_enabled(smx_actor_t actor) simgrid::kernel::activity::CommImpl* act = static_cast(simcall_comm_wait__getraw__comm(req)); -#if SIMGRID_HAVE_MC - s_xbt_dynar_t comms_buffer; - size_t buffer_size = 0; - if (mc_model_checker != nullptr) { - // Read dynar: - mc_model_checker->process().read(&comms_buffer, remote(simcall_comm_waitany__get__comms(req))); - assert(comms_buffer.elmsize == sizeof(act)); - buffer_size = comms_buffer.elmsize * comms_buffer.used; - comms = &comms_buffer; - } else - comms = simcall_comm_waitany__get__comms(req); - - // Read all the dynar buffer: - char buffer[buffer_size]; - if (mc_model_checker != nullptr) - mc_model_checker->process().read_bytes(buffer, sizeof(buffer), remote(comms->data)); -#else comms = simcall_comm_waitany__get__comms(req); -#endif for (unsigned int index = 0; index < comms->used; ++index) { -#if SIMGRID_HAVE_MC - // Fetch act from MCed memory: - // HACK, type puning - simgrid::mc::Remote temp_comm; - if (mc_model_checker != nullptr) { - memcpy(&act, buffer + comms->elmsize * index, sizeof(act)); - mc_model_checker->process().read(temp_comm, remote(act)); - act = static_cast(temp_comm.getBuffer()); - } else -#endif - act = xbt_dynar_get_as(comms, index, simgrid::kernel::activity::CommImpl*); + act = xbt_dynar_get_as(comms, index, simgrid::kernel::activity::CommImpl*); if (act->src_proc && act->dst_proc) return true; } @@ -149,24 +117,9 @@ bool actor_is_enabled(smx_actor_t actor) case SIMCALL_MUTEX_LOCK: { smx_mutex_t mutex = simcall_mutex_lock__get__mutex(req); -#if SIMGRID_HAVE_MC - simgrid::mc::Remote temp_mutex; - if (mc_model_checker != nullptr) { - mc_model_checker->process().read(temp_mutex.getBuffer(), remote(mutex)); - mutex = temp_mutex.getBuffer(); - } -#endif if (mutex->owner == nullptr) return true; -#if SIMGRID_HAVE_MC - else if (mc_model_checker != nullptr) { - simgrid::mc::RemoteClient& modelchecked = mc_model_checker->process(); - // TODO, *(mutex->owner) :/ - return modelchecked.resolveActor(simgrid::mc::remote(mutex->owner))->pid == - modelchecked.resolveActor(simgrid::mc::remote(req->issuer))->pid; - } -#endif else return mutex->owner->pid == req->issuer->pid; } @@ -193,8 +146,15 @@ bool actor_is_enabled(smx_actor_t actor) } } +/* This is the list of requests that are visible from the checker algorithm. + * Any other requests are handled right away on the application side. + */ bool request_is_visible(smx_simcall_t req) { +#if SIMGRID_HAVE_MC + xbt_assert(mc_model_checker == nullptr, "This should be called from the client side"); +#endif + return req->call == SIMCALL_COMM_ISEND || req->call == SIMCALL_COMM_IRECV || req->call == SIMCALL_COMM_WAIT diff --git a/src/mc/mc_base.h b/src/mc/mc_base.h index 54a209bf57..798dbbab41 100644 --- a/src/mc/mc_base.h +++ b/src/mc/mc_base.h @@ -39,10 +39,7 @@ XBT_PRIVATE void handle_simcall(smx_simcall_t req, int req_num); */ XBT_PRIVATE bool actor_is_enabled(smx_actor_t process); -/** Check if the given simcall is visible - * - * \return `TRUE` or `FALSE` - */ +/** Check if the given simcall is visible */ XBT_PRIVATE bool request_is_visible(smx_simcall_t req); } diff --git a/src/mc/mc_checkpoint.cpp b/src/mc/mc_checkpoint.cpp index be90588f20..9f7775c812 100644 --- a/src/mc/mc_checkpoint.cpp +++ b/src/mc/mc_checkpoint.cpp @@ -17,7 +17,7 @@ #include "src/internal_config.h" #include "src/mc/mc_private.h" -#include "src/smpi/private.h" +#include "src/smpi/include/private.h" #include "xbt/mmalloc.h" #include "xbt/module.h" diff --git a/src/mc/mc_snapshot.cpp b/src/mc/mc_snapshot.cpp index a79de0e12c..e365f98543 100644 --- a/src/mc/mc_snapshot.cpp +++ b/src/mc/mc_snapshot.cpp @@ -13,7 +13,7 @@ #include "xbt/sysdep.h" #include "src/internal_config.h" -#include "src/smpi/private.h" +#include "src/smpi/include/private.h" #include "src/mc/mc_snapshot.h" #include "src/mc/mc_private.h" diff --git a/src/mc/remote/Client.cpp b/src/mc/remote/Client.cpp index dae03e7076..9bfc523a19 100644 --- a/src/mc/remote/Client.cpp +++ b/src/mc/remote/Client.cpp @@ -23,7 +23,7 @@ #include "src/mc/remote/Client.hpp" #include "src/mc/remote/mc_protocol.h" -#include "src/smpi/private.hpp" +#include "src/smpi/include/private.hpp" // We won't need those once the separation MCer/MCed is complete: #include "src/mc/mc_smx.h" @@ -118,6 +118,12 @@ void Client::handleRestore(s_mc_message_restore_t* message) smpi_really_switch_data_segment(message->index); #endif } +void Client::handleActorEnabled(s_mc_message_actor_enabled_t* msg) +{ + bool res = simgrid::mc::actor_is_enabled(SIMIX_process_from_PID(msg->aid)); + s_mc_message_int answer{MC_MESSAGE_ACTOR_ENABLED_REPLY, res}; + channel_.send(answer); +} void Client::handleMessages() { @@ -158,6 +164,13 @@ void Client::handleMessages() handleRestore((s_mc_message_restore_t*)message_buffer); break; + case MC_MESSAGE_ACTOR_ENABLED: + xbt_assert(received_size == sizeof(s_mc_message_actor_enabled_t), + "Unexpected size for ACTOR_ENABLED (%zu != %zu)", received_size, + sizeof(s_mc_message_actor_enabled_t)); + handleActorEnabled((s_mc_message_actor_enabled_t*)message_buffer); + break; + default: xbt_die("Received unexpected message %s (%i)", MC_message_type_name(message->type), message->type); break; diff --git a/src/mc/remote/Client.hpp b/src/mc/remote/Client.hpp index b9059a3229..7770b65324 100644 --- a/src/mc/remote/Client.hpp +++ b/src/mc/remote/Client.hpp @@ -42,6 +42,7 @@ private: void handleContinue(mc_message_t* msg); void handleSimcall(s_mc_message_simcall_handle_t* message); void handleRestore(s_mc_message_restore_t* msg); + void handleActorEnabled(s_mc_message_actor_enabled_t* msg); public: Channel const& getChannel() const { return channel_; } diff --git a/src/mc/remote/RemoteClient.cpp b/src/mc/remote/RemoteClient.cpp index 6046e09772..f96e1e5ab9 100644 --- a/src/mc/remote/RemoteClient.cpp +++ b/src/mc/remote/RemoteClient.cpp @@ -673,5 +673,15 @@ void RemoteClient::dumpStack() unw_destroy_addr_space(as); return; } + +bool RemoteClient::actor_is_enabled(aid_t pid) +{ + s_mc_message_actor_enabled msg{MC_MESSAGE_ACTOR_ENABLED, pid}; + process()->getChannel().send(msg); + char buff[MC_MESSAGE_LENGTH]; + ssize_t received = process()->getChannel().receive(buff, MC_MESSAGE_LENGTH, true); + xbt_assert(received == sizeof(s_mc_message_int), "Unexpected size in answer to ACTOR_ENABLED"); + return ((mc_message_int_t*)buff)->value; +} } } diff --git a/src/mc/remote/RemoteClient.hpp b/src/mc/remote/RemoteClient.hpp index 34c31b698f..2fc858674e 100644 --- a/src/mc/remote/RemoteClient.hpp +++ b/src/mc/remote/RemoteClient.hpp @@ -302,6 +302,9 @@ public: // Libunwind-data /** The corresponding context */ void* unw_underlying_context; + + /* Check whether the given actor is enabled */ + bool actor_is_enabled(aid_t pid); }; /** Open a FD to a remote process memory (`/dev/$pid/mem`) diff --git a/src/mc/remote/mc_protocol.cpp b/src/mc/remote/mc_protocol.cpp index 5baf1d568b..74bec4e710 100644 --- a/src/mc/remote/mc_protocol.cpp +++ b/src/mc/remote/mc_protocol.cpp @@ -46,6 +46,12 @@ const char* MC_message_type_name(e_mc_message_type type) return "SIMCALL_HANDLE"; case MC_MESSAGE_ASSERTION_FAILED: return "ASSERTION_FAILED"; + + case MC_MESSAGE_ACTOR_ENABLED: + return "ACTOR_ENABLED"; + case MC_MESSAGE_ACTOR_ENABLED_REPLY: + return "ACTOR_ENABLED_REPLY"; + default: return "?"; } diff --git a/src/mc/remote/mc_protocol.h b/src/mc/remote/mc_protocol.h index 4ca6653ad7..bac7981555 100644 --- a/src/mc/remote/mc_protocol.h +++ b/src/mc/remote/mc_protocol.h @@ -11,6 +11,7 @@ #include #include "mc/datatypes.h" +#include "simgrid/forward.h" SG_BEGIN_DECL() @@ -41,6 +42,8 @@ typedef enum { MC_MESSAGE_ASSERTION_FAILED, // MCer request to finish the restoration: MC_MESSAGE_RESTORE, + MC_MESSAGE_ACTOR_ENABLED, + MC_MESSAGE_ACTOR_ENABLED_REPLY } e_mc_message_type; #define MC_MESSAGE_LENGTH 512 @@ -112,6 +115,12 @@ struct s_mc_message_restore { }; typedef struct s_mc_message_restore s_mc_message_restore_t; +struct s_mc_message_actor_enabled { + e_mc_message_type type; + aid_t aid; // actor ID +}; +typedef struct s_mc_message_actor_enabled s_mc_message_actor_enabled_t; + XBT_PRIVATE const char* MC_message_type_name(e_mc_message_type type); SG_END_DECL() diff --git a/src/simix/ActorImpl.cpp b/src/simix/ActorImpl.cpp index 4d2b7ba979..f9b0f73f6b 100644 --- a/src/simix/ActorImpl.cpp +++ b/src/simix/ActorImpl.cpp @@ -31,7 +31,7 @@ #include "src/surf/surf_interface.hpp" #ifdef HAVE_SMPI -#include "src/smpi/private.hpp" +#include "src/smpi/include/private.hpp" #endif XBT_LOG_NEW_DEFAULT_SUBCATEGORY(simix_process, simix, "Logging specific to SIMIX (process)"); diff --git a/src/simix/smx_global.cpp b/src/simix/smx_global.cpp index 68d64655d5..f0f826872f 100644 --- a/src/simix/smx_global.cpp +++ b/src/simix/smx_global.cpp @@ -25,7 +25,7 @@ #include "src/mc/mc_replay.h" #include "src/surf/StorageImpl.hpp" -#include "src/smpi/smpi_process.hpp" +#include "src/smpi/include/smpi_process.hpp" #include "src/kernel/activity/CommImpl.hpp" #include "src/kernel/activity/ExecImpl.hpp" @@ -43,8 +43,8 @@ #include "src/mc/mc_record.h" #if HAVE_SMPI -#include "src/smpi/private.h" -#include "src/smpi/private.hpp" +#include "src/smpi/include/private.h" +#include "src/smpi/include/private.hpp" #endif XBT_LOG_NEW_CATEGORY(simix, "All SIMIX categories"); diff --git a/src/smpi/bindings/smpi_f77.cpp b/src/smpi/bindings/smpi_f77.cpp new file mode 100644 index 0000000000..ad40a6b49e --- /dev/null +++ b/src/smpi/bindings/smpi_f77.cpp @@ -0,0 +1,868 @@ +/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +#include "private.h" +#include "smpi_comm.hpp" +#include "smpi_datatype.hpp" +#include "smpi_op.hpp" +#include "smpi_process.hpp" +#include "smpi_request.hpp" +#include "smpi_win.hpp" + +static int running_processes = 0; + +static void smpi_init_fortran_types(){ + if(simgrid::smpi::F2C::lookup() == nullptr){ + MPI_COMM_WORLD->add_f(); + MPI_BYTE->add_f();//MPI_BYTE + MPI_CHAR->add_f();//MPI_CHARACTER +#if defined(__alpha__) || defined(__sparc64__) || defined(__x86_64__) || defined(__ia64__) + MPI_C_BOOL->add_f();//MPI_LOGICAL + MPI_INT->add_f();//MPI_INTEGER +#else + MPI_C_BOOL->add_f();//MPI_LOGICAL + MPI_LONG->add_f();//MPI_INTEGER +#endif + MPI_INT8_T->add_f();//MPI_INTEGER1 + MPI_INT16_T->add_f();//MPI_INTEGER2 + MPI_INT32_T->add_f();//MPI_INTEGER4 + MPI_INT64_T->add_f();//MPI_INTEGER8 + MPI_REAL->add_f();//MPI_REAL + MPI_REAL4->add_f();//MPI_REAL4 + MPI_REAL8->add_f();//MPI_REAL8 + MPI_DOUBLE->add_f();//MPI_DOUBLE_PRECISION + MPI_C_FLOAT_COMPLEX->add_f();//MPI_COMPLEX + MPI_C_DOUBLE_COMPLEX->add_f();//MPI_DOUBLE_COMPLEX +#if defined(__alpha__) || defined(__sparc64__) || defined(__x86_64__) || defined(__ia64__) + MPI_2INT->add_f();//MPI_2INTEGER +#else + MPI_2LONG->add_f();//MPI_2INTEGER +#endif + MPI_UINT8_T->add_f();//MPI_LOGICAL1 + MPI_UINT16_T->add_f();//MPI_LOGICAL2 + MPI_UINT32_T->add_f();//MPI_LOGICAL4 + MPI_UINT64_T->add_f();//MPI_LOGICAL8 + MPI_2FLOAT->add_f();//MPI_2REAL + MPI_2DOUBLE->add_f();//MPI_2DOUBLE_PRECISION + MPI_PTR->add_f();//MPI_AINT + MPI_OFFSET->add_f();//MPI_OFFSET + MPI_AINT->add_f();//MPI_COUNT + MPI_REAL16->add_f();//MPI_REAL16 + MPI_PACKED->add_f();//MPI_PACKED + + MPI_MAX->add_f(); + MPI_MIN->add_f(); + MPI_MAXLOC->add_f(); + MPI_MINLOC->add_f(); + MPI_SUM->add_f(); + MPI_PROD->add_f(); + MPI_LAND->add_f(); + MPI_LOR->add_f(); + MPI_LXOR->add_f(); + MPI_BAND->add_f(); + MPI_BOR->add_f(); + MPI_BXOR->add_f(); + } +} + +extern "C" { // This should really use the C linkage to be usable from Fortran + + +void mpi_init_(int* ierr) { + smpi_init_fortran_types(); + *ierr = MPI_Init(nullptr, nullptr); + running_processes++; +} + +void mpi_finalize_(int* ierr) { + *ierr = MPI_Finalize(); + running_processes--; + if(running_processes==0){ + simgrid::smpi::F2C::delete_lookup(); + } +} + +void mpi_abort_(int* comm, int* errorcode, int* ierr) { + *ierr = MPI_Abort(simgrid::smpi::Comm::f2c(*comm), *errorcode); +} + +double mpi_wtime_() { + return MPI_Wtime(); +} + +double mpi_wtick_() { + return MPI_Wtick(); +} + +void mpi_group_incl_(int* group, int* n, int* ranks, int* group_out, int* ierr) { + MPI_Group tmp; + + *ierr = MPI_Group_incl(simgrid::smpi::Group::f2c(*group), *n, ranks, &tmp); + if(*ierr == MPI_SUCCESS) { + *group_out = tmp->add_f(); + } +} + +void mpi_initialized_(int* flag, int* ierr){ + *ierr = MPI_Initialized(flag); +} + +void mpi_get_processor_name_(char *name, int *resultlen, int* ierr){ + *ierr = MPI_Get_processor_name(name, resultlen); +} + +void mpi_get_count_(MPI_Status * status, int* datatype, int *count, int* ierr){ + *ierr = MPI_Get_count(FORT_STATUS_IGNORE(status), simgrid::smpi::Datatype::f2c(*datatype), count); +} + +void mpi_attr_get_(int* comm, int* keyval, void* attr_value, int* flag, int* ierr ){ + *ierr = MPI_Attr_get(simgrid::smpi::Comm::f2c(*comm), *keyval, attr_value, flag); +} + +void mpi_error_string_(int* errorcode, char* string, int* resultlen, int* ierr){ + *ierr = MPI_Error_string(*errorcode, string, resultlen); +} + +void mpi_win_fence_( int* assert, int* win, int* ierr){ + *ierr = MPI_Win_fence(* assert, simgrid::smpi::Win::f2c(*win)); +} + +void mpi_win_free_( int* win, int* ierr){ + MPI_Win tmp = simgrid::smpi::Win::f2c(*win); + *ierr = MPI_Win_free(&tmp); + if(*ierr == MPI_SUCCESS) { + simgrid::smpi::F2C::free_f(*win); + } +} + +void mpi_win_create_( int *base, MPI_Aint* size, int* disp_unit, int* info, int* comm, int *win, int* ierr){ + MPI_Win tmp; + *ierr = MPI_Win_create( static_cast(base), *size, *disp_unit, simgrid::smpi::Info::f2c(*info), simgrid::smpi::Comm::f2c(*comm),&tmp); + if(*ierr == MPI_SUCCESS) { + *win = tmp->add_f(); + } +} + +void mpi_win_post_(int* group, int assert, int* win, int* ierr){ + *ierr = MPI_Win_post(simgrid::smpi::Group::f2c(*group), assert, simgrid::smpi::Win::f2c(*win)); +} + +void mpi_win_start_(int* group, int assert, int* win, int* ierr){ + *ierr = MPI_Win_start(simgrid::smpi::Group::f2c(*group), assert, simgrid::smpi::Win::f2c(*win)); +} + +void mpi_win_complete_(int* win, int* ierr){ + *ierr = MPI_Win_complete(simgrid::smpi::Win::f2c(*win)); +} + +void mpi_win_wait_(int* win, int* ierr){ + *ierr = MPI_Win_wait(simgrid::smpi::Win::f2c(*win)); +} + +void mpi_win_set_name_ (int* win, char * name, int* ierr, int size){ + //handle trailing blanks + while(name[size-1]==' ') + size--; + while(*name==' '){//handle leading blanks + size--; + name++; + } + char* tname = xbt_new(char,size+1); + strncpy(tname, name, size); + tname[size]='\0'; + *ierr = MPI_Win_set_name(simgrid::smpi::Win::f2c(*win), tname); + xbt_free(tname); +} + +void mpi_win_get_name_ (int* win, char * name, int* len, int* ierr){ + *ierr = MPI_Win_get_name(simgrid::smpi::Win::f2c(*win),name,len); + if(*len>0) + name[*len]=' ';//blank padding, not \0 +} + +void mpi_win_allocate_( MPI_Aint* size, int* disp_unit, int* info, int* comm, void* base, int* win, int* ierr){ + MPI_Win tmp; + *ierr = MPI_Win_allocate( *size, *disp_unit, simgrid::smpi::Info::f2c(*info), simgrid::smpi::Comm::f2c(*comm),static_cast(base),&tmp); + if(*ierr == MPI_SUCCESS) { + *win = tmp->add_f(); + } +} + +void mpi_win_attach_(int* win, int* base, MPI_Aint* size, int* ierr){ + *ierr = MPI_Win_attach(simgrid::smpi::Win::f2c(*win), static_cast(base), *size); +} + +void mpi_win_create_dynamic_( int* info, int* comm, int *win, int* ierr){ + MPI_Win tmp; + *ierr = MPI_Win_create_dynamic( simgrid::smpi::Info::f2c(*info), simgrid::smpi::Comm::f2c(*comm),&tmp); + if(*ierr == MPI_SUCCESS) { + *win = tmp->add_f(); + } +} + +void mpi_win_detach_(int* win, int* base, int* ierr){ + *ierr = MPI_Win_detach(simgrid::smpi::Win::f2c(*win), static_cast(base)); +} + +void mpi_win_set_info_(int* win, int* info, int* ierr){ + *ierr = MPI_Win_set_info(simgrid::smpi::Win::f2c(*win), simgrid::smpi::Info::f2c(*info)); +} + +void mpi_win_get_info_(int* win, int* info, int* ierr){ + MPI_Info tmp; + *ierr = MPI_Win_get_info(simgrid::smpi::Win::f2c(*win), &tmp); + if(*ierr == MPI_SUCCESS) { + *info = tmp->add_f(); + } +} + +void mpi_win_get_group_(int* win, int* group, int* ierr){ + MPI_Group tmp; + *ierr = MPI_Win_get_group(simgrid::smpi::Win::f2c(*win), &tmp); + if(*ierr == MPI_SUCCESS) { + *group = tmp->add_f(); + } +} + +void mpi_win_get_attr_(int* win, int* type_keyval, void* attribute_val, int* flag, int* ierr){ + *ierr = MPI_Win_get_attr(simgrid::smpi::Win::f2c(*win), *type_keyval, attribute_val, flag); +} + +void mpi_win_set_attr_(int* win, int* type_keyval, void* att, int* ierr){ + *ierr = MPI_Win_set_attr(simgrid::smpi::Win::f2c(*win), *type_keyval, att); +} + +void mpi_win_delete_attr_(int* win, int* comm_keyval, int* ierr){ + *ierr = MPI_Win_delete_attr (simgrid::smpi::Win::f2c(*win), *comm_keyval); +} + +void mpi_win_create_keyval_(void* copy_fn, void* delete_fn, int* keyval, void* extra_state, int* ierr){ + *ierr = MPI_Win_create_keyval(reinterpret_cast(copy_fn), reinterpret_cast(delete_fn), + keyval, extra_state) ; +} + +void mpi_win_free_keyval_(int* keyval, int* ierr){ + *ierr = MPI_Win_free_keyval( keyval); +} + +void mpi_win_lock_(int* lock_type, int* rank, int* assert, int* win, int* ierr){ + *ierr = MPI_Win_lock(*lock_type, *rank, *assert, simgrid::smpi::Win::f2c(*win)); +} + +void mpi_win_lock_all_(int* assert, int* win, int* ierr){ + *ierr = MPI_Win_lock_all(*assert, simgrid::smpi::Win::f2c(*win)); +} + +void mpi_win_unlock_(int* rank, int* win, int* ierr){ + *ierr = MPI_Win_unlock(*rank, simgrid::smpi::Win::f2c(*win)); +} + +void mpi_win_unlock_all_(int* win, int* ierr){ + *ierr = MPI_Win_unlock_all(simgrid::smpi::Win::f2c(*win)); +} + +void mpi_win_flush_(int* rank, int* win, int* ierr){ + *ierr = MPI_Win_flush(*rank, simgrid::smpi::Win::f2c(*win)); +} + +void mpi_win_flush_local_(int* rank, int* win, int* ierr){ + *ierr = MPI_Win_flush_local(*rank, simgrid::smpi::Win::f2c(*win)); +} +void mpi_win_flush_all_(int* win, int* ierr){ + *ierr = MPI_Win_flush_all(simgrid::smpi::Win::f2c(*win)); +} + +void mpi_win_flush_local_all_(int* win, int* ierr){ + *ierr = MPI_Win_flush_local_all(simgrid::smpi::Win::f2c(*win)); +} + +void mpi_info_create_( int *info, int* ierr){ + MPI_Info tmp; + *ierr = MPI_Info_create(&tmp); + if(*ierr == MPI_SUCCESS) { + *info = tmp->add_f(); + } +} + +void mpi_info_set_( int *info, char *key, char *value, int* ierr, unsigned int keylen, unsigned int valuelen){ + //handle trailing blanks + while(key[keylen-1]==' ') + keylen--; + while(*key==' '){//handle leading blanks + keylen--; + key++; + } + char* tkey = xbt_new(char,keylen+1); + strncpy(tkey, key, keylen); + tkey[keylen]='\0'; + + while(value[valuelen-1]==' ') + valuelen--; + while(*value==' '){//handle leading blanks + valuelen--; + value++; + } + char* tvalue = xbt_new(char,valuelen+1); + strncpy(tvalue, value, valuelen); + tvalue[valuelen]='\0'; + + *ierr = MPI_Info_set( simgrid::smpi::Info::f2c(*info), tkey, tvalue); + xbt_free(tkey); + xbt_free(tvalue); +} + +void mpi_info_get_ (int* info,char *key,int* valuelen, char *value, int *flag, int* ierr, unsigned int keylen ){ + while(key[keylen-1]==' ') + keylen--; + while(*key==' '){//handle leading blanks + keylen--; + key++; + } + char* tkey = xbt_new(char,keylen+1); + strncpy(tkey, key, keylen); + tkey[keylen]='\0'; + *ierr = MPI_Info_get(simgrid::smpi::Info::f2c(*info),tkey,*valuelen, value, flag); + xbt_free(tkey); + if(*flag!=0){ + int replace=0; + int i=0; + for (i=0; i<*valuelen; i++){ + if(value[i]=='\0') + replace=1; + if(replace) + value[i]=' '; + } + } +} + +void mpi_info_free_(int* info, int* ierr){ + MPI_Info tmp = simgrid::smpi::Info::f2c(*info); + *ierr = MPI_Info_free(&tmp); + if(*ierr == MPI_SUCCESS) { + simgrid::smpi::F2C::free_f(*info); + } +} + +void mpi_get_( int *origin_addr, int* origin_count, int* origin_datatype, int *target_rank, + MPI_Aint* target_disp, int *target_count, int* tarsmpi_type_f2c, int* win, int* ierr){ + *ierr = MPI_Get( static_cast(origin_addr),*origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype),*target_rank, + *target_disp, *target_count, simgrid::smpi::Datatype::f2c(*tarsmpi_type_f2c), simgrid::smpi::Win::f2c(*win)); +} + +void mpi_rget_( int *origin_addr, int* origin_count, int* origin_datatype, int *target_rank, + MPI_Aint* target_disp, int *target_count, int* tarsmpi_type_f2c, int* win, int* request, int* ierr){ + MPI_Request req; + *ierr = MPI_Rget( static_cast(origin_addr),*origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype),*target_rank, + *target_disp, *target_count, simgrid::smpi::Datatype::f2c(*tarsmpi_type_f2c), simgrid::smpi::Win::f2c(*win), &req); + if(*ierr == MPI_SUCCESS) { + *request = req->add_f(); + } +} + +void mpi_accumulate_( int *origin_addr, int* origin_count, int* origin_datatype, int *target_rank, + MPI_Aint* target_disp, int *target_count, int* tarsmpi_type_f2c, int* op, int* win, int* ierr){ + *ierr = MPI_Accumulate( static_cast(origin_addr),*origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype),*target_rank, + *target_disp, *target_count, simgrid::smpi::Datatype::f2c(*tarsmpi_type_f2c), simgrid::smpi::Op::f2c(*op), simgrid::smpi::Win::f2c(*win)); +} + +void mpi_raccumulate_( int *origin_addr, int* origin_count, int* origin_datatype, int *target_rank, + MPI_Aint* target_disp, int *target_count, int* tarsmpi_type_f2c, int* op, int* win, int* request, int* ierr){ + MPI_Request req; + *ierr = MPI_Raccumulate( static_cast(origin_addr),*origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype),*target_rank, + *target_disp, *target_count, simgrid::smpi::Datatype::f2c(*tarsmpi_type_f2c), simgrid::smpi::Op::f2c(*op), simgrid::smpi::Win::f2c(*win),&req); + if(*ierr == MPI_SUCCESS) { + *request = req->add_f(); + } +} + +void mpi_put_( int *origin_addr, int* origin_count, int* origin_datatype, int *target_rank, + MPI_Aint* target_disp, int *target_count, int* tarsmpi_type_f2c, int* win, int* ierr){ + *ierr = MPI_Put( static_cast(origin_addr),*origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype),*target_rank, + *target_disp, *target_count, simgrid::smpi::Datatype::f2c(*tarsmpi_type_f2c), simgrid::smpi::Win::f2c(*win)); +} + +void mpi_rput_( int *origin_addr, int* origin_count, int* origin_datatype, int *target_rank, + MPI_Aint* target_disp, int *target_count, int* tarsmpi_type_f2c, int* win, int* request, int* ierr){ + MPI_Request req; + *ierr = MPI_Rput( static_cast(origin_addr),*origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype),*target_rank, + *target_disp, *target_count, simgrid::smpi::Datatype::f2c(*tarsmpi_type_f2c), simgrid::smpi::Win::f2c(*win),&req); + if(*ierr == MPI_SUCCESS) { + *request = req->add_f(); + } +} + +void mpi_fetch_and_op_( int *origin_addr, int* result_addr, int* datatype, int* target_rank, MPI_Aint* target_disp, int* op, int* win, int* ierr){ + *ierr = MPI_Fetch_and_op( static_cast(origin_addr), + static_cast(result_addr), simgrid::smpi::Datatype::f2c(*datatype),*target_rank, + *target_disp, simgrid::smpi::Op::f2c(*op), simgrid::smpi::Win::f2c(*win)); +} + +void mpi_compare_and_swap_( int *origin_addr, int* compare_addr, int* result_addr, + int* datatype, int* target_rank, MPI_Aint* target_disp, int* win, int* ierr){ + *ierr = MPI_Compare_and_swap( static_cast(origin_addr),static_cast(compare_addr), + static_cast(result_addr), simgrid::smpi::Datatype::f2c(*datatype),*target_rank, + *target_disp, simgrid::smpi::Win::f2c(*win)); +} + +void mpi_get_accumulate_(int *origin_addr, int* origin_count, int* origin_datatype, int* result_addr, + int* result_count, int* result_datatype, int* target_rank, MPI_Aint* target_disp, int* target_count, + int* target_datatype, int* op, int* win, int* ierr){ + *ierr = MPI_Get_accumulate( static_cast(origin_addr), *origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype), + static_cast(result_addr), *result_count, simgrid::smpi::Datatype::f2c(*result_datatype), + *target_rank, *target_disp, *target_count, simgrid::smpi::Datatype::f2c(*target_datatype), + simgrid::smpi::Op::f2c(*op), simgrid::smpi::Win::f2c(*win)); +} + +void mpi_rget_accumulate_(int *origin_addr, int* origin_count, int* origin_datatype, int* result_addr, + int* result_count, int* result_datatype, int* target_rank, MPI_Aint* target_disp, int* target_count, + int* target_datatype, int* op, int* win, int* request, int* ierr){ + MPI_Request req; + *ierr = MPI_Rget_accumulate( static_cast(origin_addr), *origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype), + static_cast(result_addr), *result_count, simgrid::smpi::Datatype::f2c(*result_datatype), + *target_rank, *target_disp, *target_count, simgrid::smpi::Datatype::f2c(*target_datatype), + simgrid::smpi::Op::f2c(*op), simgrid::smpi::Win::f2c(*win), &req); + if(*ierr == MPI_SUCCESS) { + *request = req->add_f(); + } +} + +//following are automatically generated, and have to be checked +void mpi_finalized_ (int * flag, int* ierr){ + + *ierr = MPI_Finalized(flag); +} + +void mpi_init_thread_ (int* required, int *provided, int* ierr){ + smpi_init_fortran_types(); + *ierr = MPI_Init_thread(nullptr, nullptr,*required, provided); + running_processes++; +} + +void mpi_query_thread_ (int *provided, int* ierr){ + + *ierr = MPI_Query_thread(provided); +} + +void mpi_is_thread_main_ (int *flag, int* ierr){ + + *ierr = MPI_Is_thread_main(flag); +} + +void mpi_address_ (void *location, MPI_Aint * address, int* ierr){ + + *ierr = MPI_Address(location, address); +} + +void mpi_get_address_ (void *location, MPI_Aint * address, int* ierr){ + + *ierr = MPI_Get_address(location, address); +} + +void mpi_pcontrol_ (int* level , int* ierr){ + *ierr = MPI_Pcontrol(*static_cast(level)); +} + +void mpi_op_create_ (void * function, int* commute, int* op, int* ierr){ + MPI_Op tmp; + *ierr = MPI_Op_create(reinterpret_cast(function),*commute, &tmp); + if(*ierr == MPI_SUCCESS) { + tmp->set_fortran_op(); + *op = tmp->add_f(); + } +} + +void mpi_op_free_ (int* op, int* ierr){ + MPI_Op tmp= simgrid::smpi::Op::f2c(*op); + *ierr = MPI_Op_free(& tmp); + if(*ierr == MPI_SUCCESS) { + simgrid::smpi::F2C::free_f(*op); + } +} + +void mpi_op_commutative_ (int* op, int* commute, int* ierr){ + *ierr = MPI_Op_commutative(simgrid::smpi::Op::f2c(*op), commute); +} + +void mpi_group_free_ (int* group, int* ierr){ + MPI_Group tmp = simgrid::smpi::Group::f2c(*group); + *ierr = MPI_Group_free(&tmp); + if(*ierr == MPI_SUCCESS) { + simgrid::smpi::F2C::free_f(*group); + } +} + +void mpi_group_size_ (int* group, int *size, int* ierr){ + + *ierr = MPI_Group_size(simgrid::smpi::Group::f2c(*group), size); +} + +void mpi_group_rank_ (int* group, int *rank, int* ierr){ + + *ierr = MPI_Group_rank(simgrid::smpi::Group::f2c(*group), rank); +} + +void mpi_group_translate_ranks_ (int* group1, int* n, int *ranks1, int* group2, int *ranks2, int* ierr) +{ + + *ierr = MPI_Group_translate_ranks(simgrid::smpi::Group::f2c(*group1), *n, ranks1, simgrid::smpi::Group::f2c(*group2), ranks2); +} + +void mpi_group_compare_ (int* group1, int* group2, int *result, int* ierr){ + + *ierr = MPI_Group_compare(simgrid::smpi::Group::f2c(*group1), simgrid::smpi::Group::f2c(*group2), result); +} + +void mpi_group_union_ (int* group1, int* group2, int* newgroup, int* ierr){ + MPI_Group tmp; + *ierr = MPI_Group_union(simgrid::smpi::Group::f2c(*group1), simgrid::smpi::Group::f2c(*group2), &tmp); + if(*ierr == MPI_SUCCESS) { + *newgroup = tmp->add_f(); + } +} + +void mpi_group_intersection_ (int* group1, int* group2, int* newgroup, int* ierr){ + MPI_Group tmp; + *ierr = MPI_Group_intersection(simgrid::smpi::Group::f2c(*group1), simgrid::smpi::Group::f2c(*group2), &tmp); + if(*ierr == MPI_SUCCESS) { + *newgroup = tmp->add_f(); + } +} + +void mpi_group_difference_ (int* group1, int* group2, int* newgroup, int* ierr){ + MPI_Group tmp; + *ierr = MPI_Group_difference(simgrid::smpi::Group::f2c(*group1), simgrid::smpi::Group::f2c(*group2), &tmp); + if(*ierr == MPI_SUCCESS) { + *newgroup = tmp->add_f(); + } +} + +void mpi_group_excl_ (int* group, int* n, int *ranks, int* newgroup, int* ierr){ + MPI_Group tmp; + *ierr = MPI_Group_excl(simgrid::smpi::Group::f2c(*group), *n, ranks, &tmp); + if(*ierr == MPI_SUCCESS) { + *newgroup = tmp->add_f(); + } +} + +void mpi_group_range_incl_ (int* group, int* n, int ranges[][3], int* newgroup, int* ierr) +{ + MPI_Group tmp; + *ierr = MPI_Group_range_incl(simgrid::smpi::Group::f2c(*group), *n, ranges, &tmp); + if(*ierr == MPI_SUCCESS) { + *newgroup = tmp->add_f(); + } +} + +void mpi_group_range_excl_ (int* group, int* n, int ranges[][3], int* newgroup, int* ierr) +{ + MPI_Group tmp; + *ierr = MPI_Group_range_excl(simgrid::smpi::Group::f2c(*group), *n, ranges, &tmp); + if(*ierr == MPI_SUCCESS) { + *newgroup = tmp->add_f(); + } +} + +void mpi_request_free_ (int* request, int* ierr){ + MPI_Request tmp=simgrid::smpi::Request::f2c(*request); + *ierr = MPI_Request_free(&tmp); + if(*ierr == MPI_SUCCESS) { + simgrid::smpi::Request::free_f(*request); + } +} + +void mpi_pack_size_ (int* incount, int* datatype, int* comm, int* size, int* ierr) { + *ierr = MPI_Pack_size(*incount, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Comm::f2c(*comm), size); +} + +void mpi_cart_coords_ (int* comm, int* rank, int* maxdims, int* coords, int* ierr) { + *ierr = MPI_Cart_coords(simgrid::smpi::Comm::f2c(*comm), *rank, *maxdims, coords); +} + +void mpi_cart_create_ (int* comm_old, int* ndims, int* dims, int* periods, int* reorder, int* comm_cart, int* ierr) { + MPI_Comm tmp; + *ierr = MPI_Cart_create(simgrid::smpi::Comm::f2c(*comm_old), *ndims, dims, periods, *reorder, &tmp); + if(*ierr == MPI_SUCCESS) { + *comm_cart = tmp->add_f(); + } +} + +void mpi_cart_get_ (int* comm, int* maxdims, int* dims, int* periods, int* coords, int* ierr) { + *ierr = MPI_Cart_get(simgrid::smpi::Comm::f2c(*comm), *maxdims, dims, periods, coords); +} + +void mpi_cart_map_ (int* comm_old, int* ndims, int* dims, int* periods, int* newrank, int* ierr) { + *ierr = MPI_Cart_map(simgrid::smpi::Comm::f2c(*comm_old), *ndims, dims, periods, newrank); +} + +void mpi_cart_rank_ (int* comm, int* coords, int* rank, int* ierr) { + *ierr = MPI_Cart_rank(simgrid::smpi::Comm::f2c(*comm), coords, rank); +} + +void mpi_cart_shift_ (int* comm, int* direction, int* displ, int* source, int* dest, int* ierr) { + *ierr = MPI_Cart_shift(simgrid::smpi::Comm::f2c(*comm), *direction, *displ, source, dest); +} + +void mpi_cart_sub_ (int* comm, int* remain_dims, int* comm_new, int* ierr) { + MPI_Comm tmp; + *ierr = MPI_Cart_sub(simgrid::smpi::Comm::f2c(*comm), remain_dims, &tmp); + if(*ierr == MPI_SUCCESS) { + *comm_new = tmp->add_f(); + } +} + +void mpi_cartdim_get_ (int* comm, int* ndims, int* ierr) { + *ierr = MPI_Cartdim_get(simgrid::smpi::Comm::f2c(*comm), ndims); +} + +void mpi_graph_create_ (int* comm_old, int* nnodes, int* index, int* edges, int* reorder, int* comm_graph, int* ierr) { + MPI_Comm tmp; + *ierr = MPI_Graph_create(simgrid::smpi::Comm::f2c(*comm_old), *nnodes, index, edges, *reorder, &tmp); + if(*ierr == MPI_SUCCESS) { + *comm_graph = tmp->add_f(); + } +} + +void mpi_graph_get_ (int* comm, int* maxindex, int* maxedges, int* index, int* edges, int* ierr) { + *ierr = MPI_Graph_get(simgrid::smpi::Comm::f2c(*comm), *maxindex, *maxedges, index, edges); +} + +void mpi_graph_map_ (int* comm_old, int* nnodes, int* index, int* edges, int* newrank, int* ierr) { + *ierr = MPI_Graph_map(simgrid::smpi::Comm::f2c(*comm_old), *nnodes, index, edges, newrank); +} + +void mpi_graph_neighbors_ (int* comm, int* rank, int* maxneighbors, int* neighbors, int* ierr) { + *ierr = MPI_Graph_neighbors(simgrid::smpi::Comm::f2c(*comm), *rank, *maxneighbors, neighbors); +} + +void mpi_graph_neighbors_count_ (int* comm, int* rank, int* nneighbors, int* ierr) { + *ierr = MPI_Graph_neighbors_count(simgrid::smpi::Comm::f2c(*comm), *rank, nneighbors); +} + +void mpi_graphdims_get_ (int* comm, int* nnodes, int* nedges, int* ierr) { + *ierr = MPI_Graphdims_get(simgrid::smpi::Comm::f2c(*comm), nnodes, nedges); +} + +void mpi_topo_test_ (int* comm, int* top_type, int* ierr) { + *ierr = MPI_Topo_test(simgrid::smpi::Comm::f2c(*comm), top_type); +} + +void mpi_error_class_ (int* errorcode, int* errorclass, int* ierr) { + *ierr = MPI_Error_class(*errorcode, errorclass); +} + +void mpi_errhandler_create_ (void* function, void* errhandler, int* ierr) { + *ierr = MPI_Errhandler_create(reinterpret_cast(function), static_cast(errhandler)); +} + +void mpi_errhandler_free_ (void* errhandler, int* ierr) { + *ierr = MPI_Errhandler_free(static_cast(errhandler)); +} + +void mpi_errhandler_get_ (int* comm, void* errhandler, int* ierr) { + *ierr = MPI_Errhandler_get(simgrid::smpi::Comm::f2c(*comm), static_cast(errhandler)); +} + +void mpi_errhandler_set_ (int* comm, void* errhandler, int* ierr) { + *ierr = MPI_Errhandler_set(simgrid::smpi::Comm::f2c(*comm), *static_cast(errhandler)); +} + +void mpi_cancel_ (int* request, int* ierr) { + MPI_Request tmp=simgrid::smpi::Request::f2c(*request); + *ierr = MPI_Cancel(&tmp); +} + +void mpi_buffer_attach_ (void* buffer, int* size, int* ierr) { + *ierr = MPI_Buffer_attach(buffer, *size); +} + +void mpi_buffer_detach_ (void* buffer, int* size, int* ierr) { + *ierr = MPI_Buffer_detach(buffer, size); +} + + + +void mpi_intercomm_create_ (int* local_comm, int *local_leader, int* peer_comm, int* remote_leader, int* tag, + int* comm_out, int* ierr) { + MPI_Comm tmp; + *ierr = MPI_Intercomm_create(simgrid::smpi::Comm::f2c(*local_comm), *local_leader, simgrid::smpi::Comm::f2c(*peer_comm), *remote_leader, + *tag, &tmp); + if(*ierr == MPI_SUCCESS) { + *comm_out = tmp->add_f(); + } +} + +void mpi_intercomm_merge_ (int* comm, int* high, int* comm_out, int* ierr) { + MPI_Comm tmp; + *ierr = MPI_Intercomm_merge(simgrid::smpi::Comm::f2c(*comm), *high, &tmp); + if(*ierr == MPI_SUCCESS) { + *comm_out = tmp->add_f(); + } +} + +void mpi_attr_delete_ (int* comm, int* keyval, int* ierr) { + *ierr = MPI_Attr_delete(simgrid::smpi::Comm::f2c(*comm), *keyval); +} + +void mpi_attr_put_ (int* comm, int* keyval, void* attr_value, int* ierr) { + *ierr = MPI_Attr_put(simgrid::smpi::Comm::f2c(*comm), *keyval, attr_value); +} + +void mpi_keyval_create_ (void* copy_fn, void* delete_fn, int* keyval, void* extra_state, int* ierr) { + *ierr = MPI_Keyval_create(reinterpret_cast(copy_fn),reinterpret_cast(delete_fn), keyval, extra_state); +} + +void mpi_keyval_free_ (int* keyval, int* ierr) { + *ierr = MPI_Keyval_free(keyval); +} + +void mpi_test_cancelled_ (MPI_Status* status, int* flag, int* ierr) { + *ierr = MPI_Test_cancelled(status, flag); +} + +void mpi_get_elements_ (MPI_Status* status, int* datatype, int* elements, int* ierr) { + *ierr = MPI_Get_elements(status, simgrid::smpi::Datatype::f2c(*datatype), elements); +} + +void mpi_dims_create_ (int* nnodes, int* ndims, int* dims, int* ierr) { + *ierr = MPI_Dims_create(*nnodes, *ndims, dims); +} + +void mpi_add_error_class_ ( int *errorclass, int* ierr){ + *ierr = MPI_Add_error_class( errorclass); +} + +void mpi_add_error_code_ ( int* errorclass, int *errorcode, int* ierr){ + *ierr = MPI_Add_error_code(*errorclass, errorcode); +} + +void mpi_add_error_string_ ( int* errorcode, char *string, int* ierr){ + *ierr = MPI_Add_error_string(*errorcode, string); +} + +void mpi_info_dup_ (int* info, int* newinfo, int* ierr){ + MPI_Info tmp; + *ierr = MPI_Info_dup(simgrid::smpi::Info::f2c(*info), &tmp); + if(*ierr==MPI_SUCCESS){ + *newinfo= tmp->add_f(); + } +} + +void mpi_info_get_valuelen_ ( int* info, char *key, int *valuelen, int *flag, int* ierr, unsigned int keylen){ + while(key[keylen-1]==' ') + keylen--; + while(*key==' '){//handle leading blanks + keylen--; + key++; + } + char* tkey = xbt_new(char, keylen+1); + strncpy(tkey, key, keylen); + tkey[keylen]='\0'; + *ierr = MPI_Info_get_valuelen( simgrid::smpi::Info::f2c(*info), tkey, valuelen, flag); + xbt_free(tkey); +} + +void mpi_info_delete_ (int* info, char *key, int* ierr, unsigned int keylen){ + while(key[keylen-1]==' ') + keylen--; + while(*key==' '){//handle leading blanks + keylen--; + key++; + } + char* tkey = xbt_new(char, keylen+1); + strncpy(tkey, key, keylen); + tkey[keylen]='\0'; + *ierr = MPI_Info_delete(simgrid::smpi::Info::f2c(*info), tkey); + xbt_free(tkey); +} + +void mpi_info_get_nkeys_ ( int* info, int *nkeys, int* ierr){ + *ierr = MPI_Info_get_nkeys( simgrid::smpi::Info::f2c(*info), nkeys); +} + +void mpi_info_get_nthkey_ ( int* info, int* n, char *key, int* ierr, unsigned int keylen){ + *ierr = MPI_Info_get_nthkey( simgrid::smpi::Info::f2c(*info), *n, key); + unsigned int i = 0; + for (i=strlen(key); i(query_fn), reinterpret_cast(free_fn), + reinterpret_cast(cancel_fn), extra_state, &tmp); + if(*ierr == MPI_SUCCESS) { + *request = tmp->add_f(); + } +} + +void mpi_grequest_complete_ ( int* request, int* ierr){ + *ierr = MPI_Grequest_complete( simgrid::smpi::Request::f2c(*request)); +} + +void mpi_status_set_cancelled_ (MPI_Status* status,int* flag, int* ierr){ + *ierr = MPI_Status_set_cancelled(status,*flag); +} + +void mpi_status_set_elements_ ( MPI_Status* status, int* datatype, int* count, int* ierr){ + *ierr = MPI_Status_set_elements( status, simgrid::smpi::Datatype::f2c(*datatype), *count); +} + +void mpi_publish_name_ ( char *service_name, int* info, char *port_name, int* ierr){ + *ierr = MPI_Publish_name( service_name, *reinterpret_cast(info), port_name); +} + +void mpi_unpublish_name_ ( char *service_name, int* info, char *port_name, int* ierr){ + *ierr = MPI_Unpublish_name( service_name, *reinterpret_cast(info), port_name); +} + +void mpi_lookup_name_ ( char *service_name, int* info, char *port_name, int* ierr){ + *ierr = MPI_Lookup_name( service_name, *reinterpret_cast(info), port_name); +} + +void mpi_open_port_ ( int* info, char *port_name, int* ierr){ + *ierr = MPI_Open_port( *reinterpret_cast(info),port_name); +} + +void mpi_close_port_ ( char *port_name, int* ierr){ + *ierr = MPI_Close_port( port_name); +} + +void mpi_file_close_ ( int* file, int* ierr){ + *ierr= MPI_File_close(reinterpret_cast(*file)); +} + +void mpi_file_delete_ ( char* filename, int* info, int* ierr){ + *ierr= MPI_File_delete(filename, simgrid::smpi::Info::f2c(*info)); +} + +void mpi_file_open_ ( int* comm, char* filename, int* amode, int* info, int* fh, int* ierr){ + *ierr= MPI_File_open(simgrid::smpi::Comm::f2c(*comm), filename, *amode, simgrid::smpi::Info::f2c(*info), reinterpret_cast(*fh)); +} + +void mpi_file_set_view_ ( int* fh, long long int* offset, int* etype, int* filetype, char* datarep, int* info, int* ierr){ + *ierr= MPI_File_set_view(reinterpret_cast(*fh) , reinterpret_cast(*offset), simgrid::smpi::Datatype::f2c(*etype), simgrid::smpi::Datatype::f2c(*filetype), datarep, simgrid::smpi::Info::f2c(*info)); +} + +void mpi_file_read_ ( int* fh, void* buf, int* count, int* datatype, MPI_Status* status, int* ierr){ + *ierr= MPI_File_read(reinterpret_cast(*fh), buf, *count, simgrid::smpi::Datatype::f2c(*datatype), status); +} + +void mpi_file_write_ ( int* fh, void* buf, int* count, int* datatype, MPI_Status* status, int* ierr){ + *ierr= MPI_File_write(reinterpret_cast(*fh), buf, *count, simgrid::smpi::Datatype::f2c(*datatype), status); +} + +} // extern "C" diff --git a/src/smpi/bindings/smpi_f77_coll.cpp b/src/smpi/bindings/smpi_f77_coll.cpp new file mode 100644 index 0000000000..1d642da9ad --- /dev/null +++ b/src/smpi/bindings/smpi_f77_coll.cpp @@ -0,0 +1,126 @@ +/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +#include "private.h" +#include "smpi_comm.hpp" +#include "smpi_datatype.hpp" +#include "smpi_op.hpp" +#include "smpi_coll.hpp" + +extern "C" { // This should really use the C linkage to be usable from Fortran + +void mpi_barrier_(int* comm, int* ierr) { + *ierr = MPI_Barrier(simgrid::smpi::Comm::f2c(*comm)); +} + +void mpi_bcast_(void *buf, int* count, int* datatype, int* root, int* comm, int* ierr) { + *ierr = MPI_Bcast(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *root, simgrid::smpi::Comm::f2c(*comm)); +} + +void mpi_reduce_(void* sendbuf, void* recvbuf, int* count, int* datatype, int* op, int* root, int* comm, int* ierr) { + sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); + sendbuf = static_cast( FORT_BOTTOM(sendbuf)); + recvbuf = static_cast( FORT_BOTTOM(recvbuf)); + *ierr = MPI_Reduce(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op), *root, simgrid::smpi::Comm::f2c(*comm)); +} + +void mpi_allreduce_(void* sendbuf, void* recvbuf, int* count, int* datatype, int* op, int* comm, int* ierr) { + sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); + *ierr = MPI_Allreduce(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm)); +} + +void mpi_reduce_scatter_(void* sendbuf, void* recvbuf, int* recvcounts, int* datatype, int* op, int* comm, int* ierr) { + sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); + *ierr = MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, simgrid::smpi::Datatype::f2c(*datatype), + simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm)); +} + +void mpi_scatter_(void* sendbuf, int* sendcount, int* sendtype, void* recvbuf, int* recvcount, int* recvtype, + int* root, int* comm, int* ierr) { + recvbuf = static_cast( FORT_IN_PLACE(recvbuf)); + *ierr = MPI_Scatter(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), + recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm)); +} + +void mpi_scatterv_(void* sendbuf, int* sendcounts, int* displs, int* sendtype, + void* recvbuf, int* recvcount, int* recvtype, int* root, int* comm, int* ierr) { + recvbuf = static_cast( FORT_IN_PLACE(recvbuf)); + *ierr = MPI_Scatterv(sendbuf, sendcounts, displs, simgrid::smpi::Datatype::f2c(*sendtype), + recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm)); +} + +void mpi_gather_(void* sendbuf, int* sendcount, int* sendtype, void* recvbuf, int* recvcount, int* recvtype, + int* root, int* comm, int* ierr) { + sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); + sendbuf = sendbuf!=MPI_IN_PLACE ? static_cast( FORT_BOTTOM(sendbuf)) : MPI_IN_PLACE; + recvbuf = static_cast( FORT_BOTTOM(recvbuf)); + *ierr = MPI_Gather(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), + recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm)); +} + +void mpi_gatherv_(void* sendbuf, int* sendcount, int* sendtype, + void* recvbuf, int* recvcounts, int* displs, int* recvtype, int* root, int* comm, int* ierr) { + sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); + sendbuf = sendbuf!=MPI_IN_PLACE ? static_cast( FORT_BOTTOM(sendbuf)) : MPI_IN_PLACE; + recvbuf = static_cast( FORT_BOTTOM(recvbuf)); + *ierr = MPI_Gatherv(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), + recvbuf, recvcounts, displs, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm)); +} + +void mpi_allgather_(void* sendbuf, int* sendcount, int* sendtype, void* recvbuf, int* recvcount, int* recvtype, + int* comm, int* ierr) { + sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); + *ierr = MPI_Allgather(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), + recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm)); +} + +void mpi_allgatherv_(void* sendbuf, int* sendcount, int* sendtype, + void* recvbuf, int* recvcounts,int* displs, int* recvtype, int* comm, int* ierr) { + sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); + *ierr = MPI_Allgatherv(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), + recvbuf, recvcounts, displs, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm)); +} + +void mpi_scan_(void* sendbuf, void* recvbuf, int* count, int* datatype, int* op, int* comm, int* ierr) { + *ierr = MPI_Scan(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), + simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm)); +} + +void mpi_alltoall_(void* sendbuf, int* sendcount, int* sendtype, + void* recvbuf, int* recvcount, int* recvtype, int* comm, int* ierr) { + *ierr = MPI_Alltoall(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), + recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm)); +} + +void mpi_alltoallv_(void* sendbuf, int* sendcounts, int* senddisps, int* sendtype, + void* recvbuf, int* recvcounts, int* recvdisps, int* recvtype, int* comm, int* ierr) { + *ierr = MPI_Alltoallv(sendbuf, sendcounts, senddisps, simgrid::smpi::Datatype::f2c(*sendtype), + recvbuf, recvcounts, recvdisps, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm)); +} + +void mpi_reduce_local_ (void *inbuf, void *inoutbuf, int* count, int* datatype, int* op, int* ierr){ + + *ierr = MPI_Reduce_local(inbuf, inoutbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op)); +} + +void mpi_reduce_scatter_block_ (void *sendbuf, void *recvbuf, int* recvcount, int* datatype, int* op, int* comm, + int* ierr) +{ + sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); + *ierr = MPI_Reduce_scatter_block(sendbuf, recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op), + simgrid::smpi::Comm::f2c(*comm)); +} + +void mpi_alltoallw_ ( void *sendbuf, int *sendcnts, int *sdispls, int* sendtypes, void *recvbuf, int *recvcnts, + int *rdispls, int* recvtypes, int* comm, int* ierr){ + *ierr = MPI_Alltoallw( sendbuf, sendcnts, sdispls, reinterpret_cast(sendtypes), recvbuf, recvcnts, rdispls, + reinterpret_cast(recvtypes), simgrid::smpi::Comm::f2c(*comm)); +} + +void mpi_exscan_ (void *sendbuf, void *recvbuf, int* count, int* datatype, int* op, int* comm, int* ierr){ + *ierr = MPI_Exscan(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm)); +} + +} diff --git a/src/smpi/bindings/smpi_f77_comm.cpp b/src/smpi/bindings/smpi_f77_comm.cpp new file mode 100644 index 0000000000..f0f56d0d2a --- /dev/null +++ b/src/smpi/bindings/smpi_f77_comm.cpp @@ -0,0 +1,240 @@ +/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +#include "private.h" +#include "smpi_comm.hpp" +#include "smpi_info.hpp" + +extern "C" { // This should really use the C linkage to be usable from Fortran + +void mpi_comm_rank_(int* comm, int* rank, int* ierr) { + *ierr = MPI_Comm_rank(simgrid::smpi::Comm::f2c(*comm), rank); +} + +void mpi_comm_size_(int* comm, int* size, int* ierr) { + *ierr = MPI_Comm_size(simgrid::smpi::Comm::f2c(*comm), size); +} + +void mpi_comm_dup_(int* comm, int* newcomm, int* ierr) { + MPI_Comm tmp; + + *ierr = MPI_Comm_dup(simgrid::smpi::Comm::f2c(*comm), &tmp); + if(*ierr == MPI_SUCCESS) { + *newcomm = tmp->add_f(); + } +} + +void mpi_comm_create_(int* comm, int* group, int* newcomm, int* ierr) { + MPI_Comm tmp; + + *ierr = MPI_Comm_create(simgrid::smpi::Comm::f2c(*comm),simgrid::smpi::Group::f2c(*group), &tmp); + if(*ierr == MPI_SUCCESS) { + *newcomm = tmp->add_f(); + } +} + +void mpi_comm_free_(int* comm, int* ierr) { + MPI_Comm tmp = simgrid::smpi::Comm::f2c(*comm); + + *ierr = MPI_Comm_free(&tmp); + + if(*ierr == MPI_SUCCESS) { + simgrid::smpi::Comm::free_f(*comm); + } +} + +void mpi_comm_split_(int* comm, int* color, int* key, int* comm_out, int* ierr) { + MPI_Comm tmp; + + *ierr = MPI_Comm_split(simgrid::smpi::Comm::f2c(*comm), *color, *key, &tmp); + if(*ierr == MPI_SUCCESS) { + *comm_out = tmp->add_f(); + } +} + +void mpi_comm_group_(int* comm, int* group_out, int* ierr) { + MPI_Group tmp; + + *ierr = MPI_Comm_group(simgrid::smpi::Comm::f2c(*comm), &tmp); + if(*ierr == MPI_SUCCESS) { + *group_out = tmp->c2f(); + } +} + +void mpi_comm_create_group_ (int* comm, int* group, int i, int* comm_out, int* ierr){ + MPI_Comm tmp; + *ierr = MPI_Comm_create_group(simgrid::smpi::Comm::f2c(*comm),simgrid::smpi::Group::f2c(*group), i, &tmp); + if(*ierr == MPI_SUCCESS) { + *comm_out = tmp->c2f(); + } +} + +void mpi_comm_get_attr_ (int* comm, int* comm_keyval, void *attribute_val, int *flag, int* ierr){ + + *ierr = MPI_Comm_get_attr (simgrid::smpi::Comm::f2c(*comm), *comm_keyval, attribute_val, flag); +} + +void mpi_comm_set_attr_ (int* comm, int* comm_keyval, void *attribute_val, int* ierr){ + + *ierr = MPI_Comm_set_attr ( simgrid::smpi::Comm::f2c(*comm), *comm_keyval, attribute_val); +} + +void mpi_comm_delete_attr_ (int* comm, int* comm_keyval, int* ierr){ + + *ierr = MPI_Comm_delete_attr (simgrid::smpi::Comm::f2c(*comm), *comm_keyval); +} + +void mpi_comm_create_keyval_ (void* copy_fn, void* delete_fn, int* keyval, void* extra_state, int* ierr){ + + *ierr = MPI_Comm_create_keyval(reinterpret_cast(copy_fn), reinterpret_cast(delete_fn), + keyval, extra_state) ; +} + +void mpi_comm_free_keyval_ (int* keyval, int* ierr) { + *ierr = MPI_Comm_free_keyval( keyval); +} + +void mpi_comm_get_name_ (int* comm, char* name, int* len, int* ierr){ + *ierr = MPI_Comm_get_name(simgrid::smpi::Comm::f2c(*comm), name, len); + if(*len>0) + name[*len]=' '; +} + +void mpi_comm_compare_ (int* comm1, int* comm2, int *result, int* ierr){ + + *ierr = MPI_Comm_compare(simgrid::smpi::Comm::f2c(*comm1), simgrid::smpi::Comm::f2c(*comm2), result); +} + +void mpi_comm_disconnect_ (int* comm, int* ierr){ + MPI_Comm tmp = simgrid::smpi::Comm::f2c(*comm); + *ierr = MPI_Comm_disconnect(&tmp); + if(*ierr == MPI_SUCCESS) { + simgrid::smpi::Comm::free_f(*comm); + } +} + +void mpi_comm_set_errhandler_ (int* comm, void* errhandler, int* ierr) { + *ierr = MPI_Errhandler_set(simgrid::smpi::Comm::f2c(*comm), *static_cast(errhandler)); +} + +void mpi_comm_get_errhandler_ (int* comm, void* errhandler, int* ierr) { + *ierr = MPI_Errhandler_set(simgrid::smpi::Comm::f2c(*comm), static_cast(errhandler)); +} + +void mpi_comm_test_inter_ (int* comm, int* flag, int* ierr) { + *ierr = MPI_Comm_test_inter(simgrid::smpi::Comm::f2c(*comm), flag); +} + +void mpi_comm_remote_group_ (int* comm, int* group, int* ierr) { + MPI_Group tmp; + *ierr = MPI_Comm_remote_group(simgrid::smpi::Comm::f2c(*comm), &tmp); + if(*ierr == MPI_SUCCESS) { + *group = tmp->c2f(); + } +} + +void mpi_comm_remote_size_ (int* comm, int* size, int* ierr) { + *ierr = MPI_Comm_remote_size(simgrid::smpi::Comm::f2c(*comm), size); +} + +void mpi_comm_set_name_ (int* comm, char* name, int* ierr, int size){ + char* tname = xbt_new(char, size+1); + strncpy(tname, name, size); + tname[size]='\0'; + *ierr = MPI_Comm_set_name (simgrid::smpi::Comm::f2c(*comm), tname); + xbt_free(tname); +} + +void mpi_comm_dup_with_info_ (int* comm, int* info, int* newcomm, int* ierr){ + MPI_Comm tmp; + *ierr = MPI_Comm_dup_with_info(simgrid::smpi::Comm::f2c(*comm), simgrid::smpi::Info::f2c(*info),&tmp); + if(*ierr == MPI_SUCCESS) { + *newcomm = tmp->add_f(); + } +} + +void mpi_comm_split_type_ (int* comm, int* split_type, int* key, int* info, int* newcomm, int* ierr){ + MPI_Comm tmp; + *ierr = MPI_Comm_split_type(simgrid::smpi::Comm::f2c(*comm), *split_type, *key, simgrid::smpi::Info::f2c(*info), &tmp); + if(*ierr == MPI_SUCCESS) { + *newcomm = tmp->add_f(); + } +} + +void mpi_comm_set_info_ (int* comm, int* info, int* ierr){ + *ierr = MPI_Comm_set_info (simgrid::smpi::Comm::f2c(*comm), simgrid::smpi::Info::f2c(*info)); +} + +void mpi_comm_get_info_ (int* comm, int* info, int* ierr){ + MPI_Info tmp; + *ierr = MPI_Comm_get_info (simgrid::smpi::Comm::f2c(*comm), &tmp); + if(*ierr==MPI_SUCCESS){ + *info = tmp->c2f(); + } +} + +void mpi_comm_create_errhandler_ ( void *function, void *errhandler, int* ierr){ + *ierr = MPI_Comm_create_errhandler( reinterpret_cast(function), static_cast(errhandler)); +} + +void mpi_comm_call_errhandler_ (int* comm,int* errorcode, int* ierr){ + *ierr = MPI_Comm_call_errhandler(simgrid::smpi::Comm::f2c(*comm), *errorcode); +} + +void mpi_comm_connect_ ( char *port_name, int* info, int* root, int* comm, int*newcomm, int* ierr){ + MPI_Comm tmp; + *ierr = MPI_Comm_connect( port_name, *reinterpret_cast(info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp); + if(*ierr == MPI_SUCCESS) { + *newcomm = tmp->add_f(); + } +} + +void mpi_comm_join_ ( int* fd, int* intercomm, int* ierr){ + MPI_Comm tmp; + *ierr = MPI_Comm_join( *fd, &tmp); + if(*ierr == MPI_SUCCESS) { + *intercomm = tmp->add_f(); + } +} + + +void mpi_comm_accept_ ( char *port_name, int* info, int* root, int* comm, int*newcomm, int* ierr){ + MPI_Comm tmp; + *ierr = MPI_Comm_accept( port_name, *reinterpret_cast(info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp); + if(*ierr == MPI_SUCCESS) { + *newcomm = tmp->add_f(); + } +} + +void mpi_comm_spawn_ ( char *command, char *argv, int* maxprocs, int* info, int* root, int* comm, int* intercomm, + int* array_of_errcodes, int* ierr){ + MPI_Comm tmp; + *ierr = MPI_Comm_spawn( command, nullptr, *maxprocs, *reinterpret_cast(info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp, + array_of_errcodes); + if(*ierr == MPI_SUCCESS) { + *intercomm = tmp->add_f(); + } +} + +void mpi_comm_spawn_multiple_ ( int* count, char *array_of_commands, char** array_of_argv, int* array_of_maxprocs, + int* array_of_info, int* root, + int* comm, int* intercomm, int* array_of_errcodes, int* ierr){ + MPI_Comm tmp; + *ierr = MPI_Comm_spawn_multiple(* count, &array_of_commands, &array_of_argv, array_of_maxprocs, + reinterpret_cast(array_of_info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp, array_of_errcodes); + if(*ierr == MPI_SUCCESS) { + *intercomm = tmp->add_f(); + } +} + +void mpi_comm_get_parent_ ( int* parent, int* ierr){ + MPI_Comm tmp; + *ierr = MPI_Comm_get_parent( &tmp); + if(*ierr == MPI_SUCCESS) { + *parent = tmp->c2f(); + } +} + +} diff --git a/src/smpi/bindings/smpi_f77_request.cpp b/src/smpi/bindings/smpi_f77_request.cpp new file mode 100644 index 0000000000..f2e77aed6f --- /dev/null +++ b/src/smpi/bindings/smpi_f77_request.cpp @@ -0,0 +1,289 @@ +/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +#include "private.h" +#include "smpi_comm.hpp" +#include "smpi_datatype.hpp" +#include "smpi_request.hpp" + + +extern "C" { // This should really use the C linkage to be usable from Fortran + +void mpi_send_init_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr) { + MPI_Request req; + buf = static_cast(FORT_BOTTOM(buf)); + *ierr = MPI_Send_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm), &req); + if(*ierr == MPI_SUCCESS) { + *request = req->add_f(); + } +} + +void mpi_isend_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr) { + MPI_Request req; + buf = static_cast(FORT_BOTTOM(buf)); + *ierr = MPI_Isend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm), &req); + if(*ierr == MPI_SUCCESS) { + *request = req->add_f(); + } +} + +void mpi_irsend_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr) { + MPI_Request req; + buf = static_cast(FORT_BOTTOM(buf)); + *ierr = MPI_Irsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm), &req); + if(*ierr == MPI_SUCCESS) { + *request = req->add_f(); + } +} + +void mpi_send_(void* buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* ierr) { + buf = static_cast(FORT_BOTTOM(buf)); + *ierr = MPI_Send(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm)); +} + +void mpi_rsend_(void* buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* ierr) { + buf = static_cast(FORT_BOTTOM(buf)); + *ierr = MPI_Rsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm)); +} + +void mpi_sendrecv_(void* sendbuf, int* sendcount, int* sendtype, int* dst, int* sendtag, void *recvbuf, int* recvcount, + int* recvtype, int* src, int* recvtag, int* comm, MPI_Status* status, int* ierr) { + sendbuf = static_cast( FORT_BOTTOM(sendbuf)); + recvbuf = static_cast( FORT_BOTTOM(recvbuf)); + *ierr = MPI_Sendrecv(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), *dst, *sendtag, recvbuf, *recvcount, + simgrid::smpi::Datatype::f2c(*recvtype), *src, *recvtag, simgrid::smpi::Comm::f2c(*comm), FORT_STATUS_IGNORE(status)); +} + +void mpi_recv_init_(void *buf, int* count, int* datatype, int* src, int* tag, int* comm, int* request, int* ierr) { + MPI_Request req; + buf = static_cast( FORT_BOTTOM(buf)); + *ierr = MPI_Recv_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *src, *tag, simgrid::smpi::Comm::f2c(*comm), &req); + if(*ierr == MPI_SUCCESS) { + *request = req->add_f(); + } +} + +void mpi_irecv_(void *buf, int* count, int* datatype, int* src, int* tag, int* comm, int* request, int* ierr) { + MPI_Request req; + buf = static_cast( FORT_BOTTOM(buf)); + *ierr = MPI_Irecv(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *src, *tag, simgrid::smpi::Comm::f2c(*comm), &req); + if(*ierr == MPI_SUCCESS) { + *request = req->add_f(); + } +} + +void mpi_recv_(void* buf, int* count, int* datatype, int* src, int* tag, int* comm, MPI_Status* status, int* ierr) { + buf = static_cast( FORT_BOTTOM(buf)); + *ierr = MPI_Recv(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *src, *tag, simgrid::smpi::Comm::f2c(*comm), status); +} + +void mpi_sendrecv_replace_ (void *buf, int* count, int* datatype, int* dst, int* sendtag, int* src, int* recvtag, + int* comm, MPI_Status* status, int* ierr) +{ + *ierr = MPI_Sendrecv_replace(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *sendtag, *src, + *recvtag, simgrid::smpi::Comm::f2c(*comm), FORT_STATUS_IGNORE(status)); +} + +void mpi_ssend_ (void* buf, int* count, int* datatype, int* dest, int* tag, int* comm, int* ierr) { + *ierr = MPI_Ssend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm)); +} + +void mpi_ssend_init_ (void* buf, int* count, int* datatype, int* dest, int* tag, int* comm, int* request, int* ierr) { + MPI_Request tmp; + *ierr = MPI_Ssend_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp); + if(*ierr == MPI_SUCCESS) { + *request = tmp->add_f(); + } +} + +void mpi_bsend_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* ierr) { + *ierr = MPI_Bsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm)); +} + +void mpi_bsend_init_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) { + MPI_Request tmp; + *ierr = MPI_Bsend_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp); + if(*ierr == MPI_SUCCESS) { + *request = tmp->add_f(); + } +} + +void mpi_ibsend_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) { + MPI_Request tmp; + *ierr = MPI_Ibsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp); + if(*ierr == MPI_SUCCESS) { + *request = tmp->add_f(); + } +} + +void mpi_issend_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) { + MPI_Request tmp; + *ierr = MPI_Issend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp); + if(*ierr == MPI_SUCCESS) { + *request = tmp->add_f(); + } +} + +void mpi_rsend_init_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) { + MPI_Request tmp; + *ierr = MPI_Rsend_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp); + if(*ierr == MPI_SUCCESS) { + *request = tmp->add_f(); + } +} + +void mpi_start_(int* request, int* ierr) { + MPI_Request req = simgrid::smpi::Request::f2c(*request); + + *ierr = MPI_Start(&req); +} + +void mpi_startall_(int* count, int* requests, int* ierr) { + MPI_Request* reqs; + int i; + + reqs = xbt_new(MPI_Request, *count); + for(i = 0; i < *count; i++) { + reqs[i] = simgrid::smpi::Request::f2c(requests[i]); + } + *ierr = MPI_Startall(*count, reqs); + xbt_free(reqs); +} + +void mpi_wait_(int* request, MPI_Status* status, int* ierr) { + MPI_Request req = simgrid::smpi::Request::f2c(*request); + + *ierr = MPI_Wait(&req, FORT_STATUS_IGNORE(status)); + if(req==MPI_REQUEST_NULL){ + simgrid::smpi::Request::free_f(*request); + *request=MPI_FORTRAN_REQUEST_NULL; + } +} + +void mpi_waitany_(int* count, int* requests, int* index, MPI_Status* status, int* ierr) { + MPI_Request* reqs; + int i; + + reqs = xbt_new(MPI_Request, *count); + for(i = 0; i < *count; i++) { + reqs[i] = simgrid::smpi::Request::f2c(requests[i]); + } + *ierr = MPI_Waitany(*count, reqs, index, status); + if(reqs[*index]==MPI_REQUEST_NULL){ + simgrid::smpi::Request::free_f(requests[*index]); + requests[*index]=MPI_FORTRAN_REQUEST_NULL; + } + xbt_free(reqs); +} + +void mpi_waitall_(int* count, int* requests, MPI_Status* status, int* ierr) { + MPI_Request* reqs; + int i; + + reqs = xbt_new(MPI_Request, *count); + for(i = 0; i < *count; i++) { + reqs[i] = simgrid::smpi::Request::f2c(requests[i]); + } + *ierr = MPI_Waitall(*count, reqs, FORT_STATUSES_IGNORE(status)); + for(i = 0; i < *count; i++) { + if(reqs[i]==MPI_REQUEST_NULL){ + simgrid::smpi::Request::free_f(requests[i]); + requests[i]=MPI_FORTRAN_REQUEST_NULL; + } + } + + xbt_free(reqs); +} + +void mpi_waitsome_ (int* incount, int* requests, int *outcount, int *indices, MPI_Status* status, int* ierr) +{ + MPI_Request* reqs; + int i; + + reqs = xbt_new(MPI_Request, *incount); + for(i = 0; i < *incount; i++) { + reqs[i] = simgrid::smpi::Request::f2c(requests[i]); + } + *ierr = MPI_Waitsome(*incount, reqs, outcount, indices, status); + for(i=0;i<*outcount;i++){ + if(reqs[indices[i]]==MPI_REQUEST_NULL){ + simgrid::smpi::Request::free_f(requests[indices[i]]); + requests[indices[i]]=MPI_FORTRAN_REQUEST_NULL; + } + } + xbt_free(reqs); +} + +void mpi_test_ (int * request, int *flag, MPI_Status * status, int* ierr){ + MPI_Request req = simgrid::smpi::Request::f2c(*request); + *ierr= MPI_Test(&req, flag, FORT_STATUS_IGNORE(status)); + if(req==MPI_REQUEST_NULL){ + simgrid::smpi::Request::free_f(*request); + *request=MPI_FORTRAN_REQUEST_NULL; + } +} + +void mpi_testall_ (int* count, int * requests, int *flag, MPI_Status * statuses, int* ierr){ + int i; + MPI_Request* reqs = xbt_new(MPI_Request, *count); + for(i = 0; i < *count; i++) { + reqs[i] = simgrid::smpi::Request::f2c(requests[i]); + } + *ierr= MPI_Testall(*count, reqs, flag, FORT_STATUSES_IGNORE(statuses)); + for(i = 0; i < *count; i++) { + if(reqs[i]==MPI_REQUEST_NULL){ + simgrid::smpi::Request::free_f(requests[i]); + requests[i]=MPI_FORTRAN_REQUEST_NULL; + } + } + xbt_free(reqs); +} + +void mpi_testany_ (int* count, int* requests, int *index, int *flag, MPI_Status* status, int* ierr) +{ + MPI_Request* reqs; + int i; + + reqs = xbt_new(MPI_Request, *count); + for(i = 0; i < *count; i++) { + reqs[i] = simgrid::smpi::Request::f2c(requests[i]); + } + *ierr = MPI_Testany(*count, reqs, index, flag, FORT_STATUS_IGNORE(status)); + if(*index!=MPI_UNDEFINED && reqs[*index]==MPI_REQUEST_NULL){ + simgrid::smpi::Request::free_f(requests[*index]); + requests[*index]=MPI_FORTRAN_REQUEST_NULL; + } + xbt_free(reqs); +} + +void mpi_testsome_ (int* incount, int* requests, int* outcount, int* indices, MPI_Status* statuses, int* ierr) { + MPI_Request* reqs; + int i; + + reqs = xbt_new(MPI_Request, *incount); + for(i = 0; i < *incount; i++) { + reqs[i] = simgrid::smpi::Request::f2c(requests[i]); + indices[i]=0; + } + *ierr = MPI_Testsome(*incount, reqs, outcount, indices, FORT_STATUSES_IGNORE(statuses)); + for(i=0;i<*incount;i++){ + if(indices[i] && reqs[indices[i]]==MPI_REQUEST_NULL){ + simgrid::smpi::Request::free_f(requests[indices[i]]); + requests[indices[i]]=MPI_FORTRAN_REQUEST_NULL; + } + } + xbt_free(reqs); +} + +void mpi_probe_ (int* source, int* tag, int* comm, MPI_Status* status, int* ierr) { + *ierr = MPI_Probe(*source, *tag, simgrid::smpi::Comm::f2c(*comm), FORT_STATUS_IGNORE(status)); +} + + +void mpi_iprobe_ (int* source, int* tag, int* comm, int* flag, MPI_Status* status, int* ierr) { + *ierr = MPI_Iprobe(*source, *tag, simgrid::smpi::Comm::f2c(*comm), flag, status); +} + +} diff --git a/src/smpi/bindings/smpi_f77_type.cpp b/src/smpi/bindings/smpi_f77_type.cpp new file mode 100644 index 0000000000..1da72486c8 --- /dev/null +++ b/src/smpi/bindings/smpi_f77_type.cpp @@ -0,0 +1,285 @@ +/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +#include "private.h" +#include "smpi_comm.hpp" +#include "smpi_datatype.hpp" + +extern "C" { // This should really use the C linkage to be usable from Fortran + +void mpi_type_extent_(int* datatype, MPI_Aint * extent, int* ierr){ + *ierr= MPI_Type_extent(simgrid::smpi::Datatype::f2c(*datatype), extent); +} + +void mpi_type_free_(int* datatype, int* ierr){ + MPI_Datatype tmp= simgrid::smpi::Datatype::f2c(*datatype); + *ierr= MPI_Type_free (&tmp); + if(*ierr == MPI_SUCCESS) { + simgrid::smpi::F2C::free_f(*datatype); + } +} + +void mpi_type_ub_(int* datatype, MPI_Aint * disp, int* ierr){ + *ierr= MPI_Type_ub(simgrid::smpi::Datatype::f2c(*datatype), disp); +} + +void mpi_type_lb_(int* datatype, MPI_Aint * extent, int* ierr){ + *ierr= MPI_Type_extent(simgrid::smpi::Datatype::f2c(*datatype), extent); +} + +void mpi_type_size_(int* datatype, int *size, int* ierr) +{ + *ierr = MPI_Type_size(simgrid::smpi::Datatype::f2c(*datatype), size); +} + +void mpi_type_dup_ (int* datatype, int* newdatatype, int* ierr){ + MPI_Datatype tmp; + *ierr = MPI_Type_dup(simgrid::smpi::Datatype::f2c(*datatype), &tmp); + if(*ierr == MPI_SUCCESS) { + *newdatatype = tmp->add_f(); + } +} + +void mpi_type_set_name_ (int* datatype, char * name, int* ierr, int size){ + char* tname = xbt_new(char, size+1); + strncpy(tname, name, size); + tname[size]='\0'; + *ierr = MPI_Type_set_name(simgrid::smpi::Datatype::f2c(*datatype), tname); + xbt_free(tname); +} + +void mpi_type_get_name_ (int* datatype, char * name, int* len, int* ierr){ + *ierr = MPI_Type_get_name(simgrid::smpi::Datatype::f2c(*datatype),name,len); + if(*len>0) + name[*len]=' '; +} + +void mpi_type_get_attr_ (int* type, int* type_keyval, void *attribute_val, int* flag, int* ierr){ + + *ierr = MPI_Type_get_attr ( simgrid::smpi::Datatype::f2c(*type), *type_keyval, attribute_val,flag); +} + +void mpi_type_set_attr_ (int* type, int* type_keyval, void *attribute_val, int* ierr){ + + *ierr = MPI_Type_set_attr ( simgrid::smpi::Datatype::f2c(*type), *type_keyval, attribute_val); +} + +void mpi_type_delete_attr_ (int* type, int* type_keyval, int* ierr){ + + *ierr = MPI_Type_delete_attr ( simgrid::smpi::Datatype::f2c(*type), *type_keyval); +} + +void mpi_type_create_keyval_ (void* copy_fn, void* delete_fn, int* keyval, void* extra_state, int* ierr){ + + *ierr = MPI_Type_create_keyval(reinterpret_cast(copy_fn), reinterpret_cast(delete_fn), + keyval, extra_state) ; +} + +void mpi_type_free_keyval_ (int* keyval, int* ierr) { + *ierr = MPI_Type_free_keyval( keyval); +} + +void mpi_type_get_extent_ (int* datatype, MPI_Aint * lb, MPI_Aint * extent, int* ierr){ + + *ierr = MPI_Type_get_extent(simgrid::smpi::Datatype::f2c(*datatype), lb, extent); +} + +void mpi_type_get_true_extent_ (int* datatype, MPI_Aint * lb, MPI_Aint * extent, int* ierr){ + + *ierr = MPI_Type_get_true_extent(simgrid::smpi::Datatype::f2c(*datatype), lb, extent); +} + +void mpi_type_commit_(int* datatype, int* ierr){ + MPI_Datatype tmp= simgrid::smpi::Datatype::f2c(*datatype); + *ierr= MPI_Type_commit(&tmp); +} + +void mpi_type_contiguous_ (int* count, int* old_type, int* newtype, int* ierr) { + MPI_Datatype tmp; + *ierr = MPI_Type_contiguous(*count, simgrid::smpi::Datatype::f2c(*old_type), &tmp); + if(*ierr == MPI_SUCCESS) { + *newtype = tmp->add_f(); + } +} + +void mpi_type_vector_(int* count, int* blocklen, int* stride, int* old_type, int* newtype, int* ierr){ + MPI_Datatype tmp; + *ierr= MPI_Type_vector(*count, *blocklen, *stride, simgrid::smpi::Datatype::f2c(*old_type), &tmp); + if(*ierr == MPI_SUCCESS) { + *newtype = tmp->add_f(); + } +} + +void mpi_type_hvector_(int* count, int* blocklen, MPI_Aint* stride, int* old_type, int* newtype, int* ierr){ + MPI_Datatype tmp; + *ierr= MPI_Type_hvector (*count, *blocklen, *stride, simgrid::smpi::Datatype::f2c(*old_type), &tmp); + if(*ierr == MPI_SUCCESS) { + *newtype = tmp->add_f(); + } +} + +void mpi_type_create_hvector_(int* count, int* blocklen, MPI_Aint* stride, int* old_type, int* newtype, int* ierr){ + MPI_Datatype tmp; + *ierr= MPI_Type_hvector(*count, *blocklen, *stride, simgrid::smpi::Datatype::f2c(*old_type), &tmp); + if(*ierr == MPI_SUCCESS) { + *newtype = tmp->add_f(); + } +} + +void mpi_type_hindexed_ (int* count, int* blocklens, MPI_Aint* indices, int* old_type, int* newtype, int* ierr) { + MPI_Datatype tmp; + *ierr = MPI_Type_hindexed(*count, blocklens, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp); + if(*ierr == MPI_SUCCESS) { + *newtype = tmp->add_f(); + } +} + +void mpi_type_create_hindexed_(int* count, int* blocklens, MPI_Aint* indices, int* old_type, int* newtype, int* ierr){ + MPI_Datatype tmp; + *ierr = MPI_Type_create_hindexed(*count, blocklens, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp); + if(*ierr == MPI_SUCCESS) { + *newtype = tmp->add_f(); + } +} + +void mpi_type_create_hindexed_block_ (int* count, int* blocklength, MPI_Aint* indices, int* old_type, int* newtype, + int* ierr) { + MPI_Datatype tmp; + *ierr = MPI_Type_create_hindexed_block(*count, *blocklength, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp); + if(*ierr == MPI_SUCCESS) { + *newtype = tmp->add_f(); + } +} + +void mpi_type_indexed_ (int* count, int* blocklens, int* indices, int* old_type, int* newtype, int* ierr) { + MPI_Datatype tmp; + *ierr = MPI_Type_indexed(*count, blocklens, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp); + if(*ierr == MPI_SUCCESS) { + *newtype = tmp->add_f(); + } +} + +void mpi_type_create_indexed_(int* count, int* blocklens, int* indices, int* old_type, int* newtype, int* ierr){ + MPI_Datatype tmp; + *ierr = MPI_Type_create_indexed(*count, blocklens, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp); + if(*ierr == MPI_SUCCESS) { + *newtype = tmp->add_f(); + } +} + +void mpi_type_create_indexed_block_ (int* count, int* blocklength, int* indices, int* old_type, int*newtype, + int* ierr){ + MPI_Datatype tmp; + *ierr = MPI_Type_create_indexed_block(*count, *blocklength, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp); + if(*ierr == MPI_SUCCESS) { + *newtype = tmp->add_f(); + } +} + +void mpi_type_struct_ (int* count, int* blocklens, MPI_Aint* indices, int* old_types, int* newtype, int* ierr) { + MPI_Datatype tmp; + int i=0; + MPI_Datatype* types = static_cast(xbt_malloc(*count*sizeof(MPI_Datatype))); + for(i=0; i< *count; i++){ + types[i] = simgrid::smpi::Datatype::f2c(old_types[i]); + } + *ierr = MPI_Type_struct(*count, blocklens, indices, types, &tmp); + if(*ierr == MPI_SUCCESS) { + *newtype = tmp->add_f(); + } + xbt_free(types); +} + +void mpi_type_create_struct_(int* count, int* blocklens, MPI_Aint* indices, int* old_types, int* newtype, int* ierr){ + MPI_Datatype tmp; + int i=0; + MPI_Datatype* types = static_cast(xbt_malloc(*count*sizeof(MPI_Datatype))); + for(i=0; i< *count; i++){ + types[i] = simgrid::smpi::Datatype::f2c(old_types[i]); + } + *ierr = MPI_Type_create_struct(*count, blocklens, indices, types, &tmp); + if(*ierr == MPI_SUCCESS) { + *newtype = tmp->add_f(); + } + xbt_free(types); +} + +void mpi_pack_ (void* inbuf, int* incount, int* type, void* outbuf, int* outcount, int* position, int* comm, int* ierr) { + *ierr = MPI_Pack(inbuf, *incount, simgrid::smpi::Datatype::f2c(*type), outbuf, *outcount, position, simgrid::smpi::Comm::f2c(*comm)); +} + +void mpi_unpack_ (void* inbuf, int* insize, int* position, void* outbuf, int* outcount, int* type, int* comm, + int* ierr) { + *ierr = MPI_Unpack(inbuf, *insize, position, outbuf, *outcount, simgrid::smpi::Datatype::f2c(*type), simgrid::smpi::Comm::f2c(*comm)); +} + +void mpi_pack_external_size_ (char *datarep, int* incount, int* datatype, MPI_Aint *size, int* ierr){ + *ierr = MPI_Pack_external_size(datarep, *incount, simgrid::smpi::Datatype::f2c(*datatype), size); +} + +void mpi_pack_external_ (char *datarep, void *inbuf, int* incount, int* datatype, void *outbuf, MPI_Aint* outcount, + MPI_Aint *position, int* ierr){ + *ierr = MPI_Pack_external(datarep, inbuf, *incount, simgrid::smpi::Datatype::f2c(*datatype), outbuf, *outcount, position); +} + +void mpi_unpack_external_ ( char *datarep, void *inbuf, MPI_Aint* insize, MPI_Aint *position, void *outbuf, + int* outcount, int* datatype, int* ierr){ + *ierr = MPI_Unpack_external( datarep, inbuf, *insize, position, outbuf, *outcount, simgrid::smpi::Datatype::f2c(*datatype)); +} + + +void mpi_type_get_envelope_ ( int* datatype, int *num_integers, int *num_addresses, int *num_datatypes, int *combiner, + int* ierr){ + *ierr = MPI_Type_get_envelope( simgrid::smpi::Datatype::f2c(*datatype), num_integers, + num_addresses, num_datatypes, combiner); +} + +void mpi_type_get_contents_ (int* datatype, int* max_integers, int* max_addresses, int* max_datatypes, + int* array_of_integers, MPI_Aint* array_of_addresses, + int* array_of_datatypes, int* ierr){ + *ierr = MPI_Type_get_contents(simgrid::smpi::Datatype::f2c(*datatype), *max_integers, *max_addresses,*max_datatypes, + array_of_integers, array_of_addresses, reinterpret_cast(array_of_datatypes)); +} + +void mpi_type_create_darray_ (int* size, int* rank, int* ndims, int* array_of_gsizes, int* array_of_distribs, + int* array_of_dargs, int* array_of_psizes, + int* order, int* oldtype, int*newtype, int* ierr) { + MPI_Datatype tmp; + *ierr = MPI_Type_create_darray(*size, *rank, *ndims, array_of_gsizes, + array_of_distribs, array_of_dargs, array_of_psizes, + *order, simgrid::smpi::Datatype::f2c(*oldtype), &tmp) ; + if(*ierr == MPI_SUCCESS) { + *newtype = tmp->add_f(); + } +} + +void mpi_type_create_resized_ (int* oldtype,MPI_Aint* lb, MPI_Aint* extent, int*newtype, int* ierr){ + MPI_Datatype tmp; + *ierr = MPI_Type_create_resized(simgrid::smpi::Datatype::f2c(*oldtype),*lb, *extent, &tmp); + if(*ierr == MPI_SUCCESS) { + *newtype = tmp->add_f(); + } +} + +void mpi_type_create_subarray_ (int* ndims,int *array_of_sizes, int *array_of_subsizes, int *array_of_starts, + int* order, int* oldtype, int*newtype, int* ierr){ + MPI_Datatype tmp; + *ierr = MPI_Type_create_subarray(*ndims,array_of_sizes, array_of_subsizes, array_of_starts, *order, + simgrid::smpi::Datatype::f2c(*oldtype), &tmp); + if(*ierr == MPI_SUCCESS) { + *newtype = tmp->add_f(); + } +} + +void mpi_type_match_size_ (int* typeclass,int* size,int* datatype, int* ierr){ + MPI_Datatype tmp; + *ierr = MPI_Type_match_size(*typeclass,*size,&tmp); + if(*ierr == MPI_SUCCESS) { + *datatype = tmp->c2f(); + } +} + + +} diff --git a/src/smpi/smpi_mpi.cpp b/src/smpi/bindings/smpi_mpi.cpp similarity index 95% rename from src/smpi/smpi_mpi.cpp rename to src/smpi/bindings/smpi_mpi.cpp index dca1010b50..9c9bdb6796 100644 --- a/src/smpi/smpi_mpi.cpp +++ b/src/smpi/bindings/smpi_mpi.cpp @@ -72,7 +72,7 @@ WRAPPED_PMPI_CALL(int,MPI_Comm_size,(MPI_Comm comm, int *size),(comm, size)) WRAPPED_PMPI_CALL(int,MPI_Comm_split,(MPI_Comm comm, int color, int key, MPI_Comm* comm_out),(comm, color, key, comm_out)) WRAPPED_PMPI_CALL(int,MPI_Comm_create_group,(MPI_Comm comm, MPI_Group group, int tag, MPI_Comm* comm_out),(comm, group, tag, comm_out)) WRAPPED_PMPI_CALL(int,MPI_Compare_and_swap,(void *origin_addr, void *compare_addr, - void *result_addr, MPI_Datatype datatype, int target_rank, MPI_Aint target_disp, MPI_Win win), (origin_addr, compare_addr, result_addr, datatype, target_rank, target_disp, win)); + void *result_addr, MPI_Datatype datatype, int target_rank, MPI_Aint target_disp, MPI_Win win), (origin_addr, compare_addr, result_addr, datatype, target_rank, target_disp, win)) WRAPPED_PMPI_CALL(int,MPI_Dims_create,(int nnodes, int ndims, int* dims) ,(nnodes, ndims, dims)) WRAPPED_PMPI_CALL(int,MPI_Error_class,(int errorcode, int* errorclass) ,(errorcode, errorclass)) WRAPPED_PMPI_CALL(int,MPI_Exscan,(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm),(sendbuf, recvbuf, count, datatype, op, comm)) @@ -276,68 +276,68 @@ UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Errhandler_free,(MPI_Errhandler* errhand UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Errhandler_get,(MPI_Comm comm, MPI_Errhandler* errhandler) ,(comm, errhandler)) UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Errhandler_set,(MPI_Comm comm, MPI_Errhandler errhandler) ,(comm, errhandler)) UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Error_string,(int errorcode, char* string, int* resultlen) ,(errorcode, string, resultlen)) -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Register_datarep, (char *datarep, MPI_Datarep_conversion_function *read_conversion_fn, MPI_Datarep_conversion_function *write_conversion_fn, MPI_Datarep_extent_function *dtype_file_extent_fn, void *extra_state) ,(datarep, read_conversion_fn, write_conversion_fn, dtype_file_extent_fn, extra_state)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(MPI_Fint, MPI_File_c2f,(MPI_File file), (file)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(MPI_File, MPI_File_f2c,(MPI_Fint file), (file)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_call_errhandler,(MPI_File fh, int errorcode), (fh, errorcode)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_create_errhandler,(MPI_File_errhandler_function *function, MPI_Errhandler *errhandler),(function, errhandler)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_set_errhandler,( MPI_File file, MPI_Errhandler errhandler), (file, errhandler)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_errhandler,( MPI_File file, MPI_Errhandler *errhandler), (file, errhandler)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_open,(MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh),(comm, filename, amode, info, fh)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_close,(MPI_File *fh), (fh)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_delete,(char *filename, MPI_Info info), (filename, info)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_set_size,(MPI_File fh, MPI_Offset size), (fh, size)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_preallocate,(MPI_File fh, MPI_Offset size), (fh, size)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_size,(MPI_File fh, MPI_Offset *size), (fh, size)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_group,(MPI_File fh, MPI_Group *group), (fh, group)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_amode,(MPI_File fh, int *amode), (fh, amode)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_set_info,(MPI_File fh, MPI_Info info), (fh, info)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_info,(MPI_File fh, MPI_Info *info_used), (fh, info_used)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_set_view,(MPI_File fh, MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, char *datarep, MPI_Info info), (fh, disp, etype, filetype, datarep, info)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_view,(MPI_File fh, MPI_Offset *disp, MPI_Datatype *etype, MPI_Datatype *filetype, char *datarep), (fh, disp, etype, filetype, datarep)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_at,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, offset, buf, count, datatype, status)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_at_all,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, offset, buf, count, datatype, status)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_at,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, offset, buf, count, datatype, status)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_at_all,(MPI_File fh, MPI_Offset offset, void *buf,int count, MPI_Datatype datatype, MPI_Status *status), (fh, offset, buf, count, datatype, status)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iread_at,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, offset, buf, count, datatype, request)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iwrite_at,(MPI_File fh, MPI_Offset offset, void *buf,int count, MPI_Datatype datatype, MPI_Request *request), (fh, offset, buf, count, datatype, request)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iread_at_all,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, offset, buf, count, datatype, request)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iwrite_at_all,(MPI_File fh, MPI_Offset offset, void *buf,int count, MPI_Datatype datatype, MPI_Request *request), (fh, offset, buf, count, datatype, request)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read,(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_all,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_all,(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iread,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, buf, count, datatype, request)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iwrite,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, buf, count, datatype, request)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iread_all,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, buf, count, datatype, request)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iwrite_all,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, buf, count, datatype, request)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_seek,(MPI_File fh, MPI_Offset offset, int whenace), (fh, offset, whenace)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_position,(MPI_File fh, MPI_Offset *offset), (fh, offset)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_byte_offset,(MPI_File fh, MPI_Offset offset, MPI_Offset *disp), (fh, offset, disp)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_shared,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_shared,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iread_shared,(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Request *request), (fh, buf, count, datatype, request)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iwrite_shared,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, buf, count, datatype, request)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_ordered,(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_ordered,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_seek_shared,(MPI_File fh, MPI_Offset offset, int whence), (fh, offset, whence)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_position_shared,(MPI_File fh, MPI_Offset *offset), (fh, offset)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_at_all_begin,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype), (fh, offset, buf, count, datatype)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_at_all_end,(MPI_File fh, void *buf, MPI_Status *status), (fh, buf, status)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_at_all_begin,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype), (fh, offset, buf, count, datatype)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_at_all_end,(MPI_File fh, void *buf, MPI_Status *status), (fh, buf, status)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_all_begin,(MPI_File fh, void *buf, int count, MPI_Datatype datatype), (fh, buf, count, datatype)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_all_end,(MPI_File fh, void *buf, MPI_Status *status), (fh, buf, status)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_all_begin,(MPI_File fh, void *buf, int count, MPI_Datatype datatype), (fh, buf, count, datatype)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_all_end,(MPI_File fh, void *buf, MPI_Status *status), (fh, buf, status)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_ordered_begin,(MPI_File fh, void *buf, int count, MPI_Datatype datatype), (fh, buf, count, datatype)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_ordered_end,(MPI_File fh, void *buf, MPI_Status *status), (fh, buf, status)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_ordered_begin,(MPI_File fh, void *buf, int count, MPI_Datatype datatype), (fh, buf, count, datatype)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_ordered_end,(MPI_File fh, void *buf, MPI_Status *status), (fh, buf, status)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_type_extent,(MPI_File fh, MPI_Datatype datatype, MPI_Aint *extent), (fh, datatype, extent)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_set_atomicity,(MPI_File fh, int flag), (fh, flag)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_atomicity,(MPI_File fh, int *flag), (fh, flag)); -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_sync,(MPI_File fh), (fh)); +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Register_datarep, (char *datarep, MPI_Datarep_conversion_function *read_conversion_fn, MPI_Datarep_conversion_function *write_conversion_fn, MPI_Datarep_extent_function *dtype_file_extent_fn, void *extra_state) ,(datarep, read_conversion_fn, write_conversion_fn, dtype_file_extent_fn, extra_state)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(MPI_Fint, MPI_File_c2f,(MPI_File file), (file)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(MPI_File, MPI_File_f2c,(MPI_Fint file), (file)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_call_errhandler,(MPI_File fh, int errorcode), (fh, errorcode)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_create_errhandler,(MPI_File_errhandler_function *function, MPI_Errhandler *errhandler),(function, errhandler)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_set_errhandler,( MPI_File file, MPI_Errhandler errhandler), (file, errhandler)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_errhandler,( MPI_File file, MPI_Errhandler *errhandler), (file, errhandler)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_open,(MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh),(comm, filename, amode, info, fh)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_close,(MPI_File *fh), (fh)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_delete,(char *filename, MPI_Info info), (filename, info)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_set_size,(MPI_File fh, MPI_Offset size), (fh, size)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_preallocate,(MPI_File fh, MPI_Offset size), (fh, size)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_size,(MPI_File fh, MPI_Offset *size), (fh, size)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_group,(MPI_File fh, MPI_Group *group), (fh, group)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_amode,(MPI_File fh, int *amode), (fh, amode)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_set_info,(MPI_File fh, MPI_Info info), (fh, info)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_info,(MPI_File fh, MPI_Info *info_used), (fh, info_used)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_set_view,(MPI_File fh, MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, char *datarep, MPI_Info info), (fh, disp, etype, filetype, datarep, info)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_view,(MPI_File fh, MPI_Offset *disp, MPI_Datatype *etype, MPI_Datatype *filetype, char *datarep), (fh, disp, etype, filetype, datarep)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_at,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, offset, buf, count, datatype, status)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_at_all,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, offset, buf, count, datatype, status)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_at,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, offset, buf, count, datatype, status)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_at_all,(MPI_File fh, MPI_Offset offset, void *buf,int count, MPI_Datatype datatype, MPI_Status *status), (fh, offset, buf, count, datatype, status)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iread_at,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, offset, buf, count, datatype, request)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iwrite_at,(MPI_File fh, MPI_Offset offset, void *buf,int count, MPI_Datatype datatype, MPI_Request *request), (fh, offset, buf, count, datatype, request)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iread_at_all,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, offset, buf, count, datatype, request)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iwrite_at_all,(MPI_File fh, MPI_Offset offset, void *buf,int count, MPI_Datatype datatype, MPI_Request *request), (fh, offset, buf, count, datatype, request)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read,(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_all,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_all,(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iread,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, buf, count, datatype, request)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iwrite,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, buf, count, datatype, request)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iread_all,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, buf, count, datatype, request)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iwrite_all,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, buf, count, datatype, request)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_seek,(MPI_File fh, MPI_Offset offset, int whenace), (fh, offset, whenace)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_position,(MPI_File fh, MPI_Offset *offset), (fh, offset)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_byte_offset,(MPI_File fh, MPI_Offset offset, MPI_Offset *disp), (fh, offset, disp)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_shared,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_shared,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iread_shared,(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Request *request), (fh, buf, count, datatype, request)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iwrite_shared,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, buf, count, datatype, request)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_ordered,(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_ordered,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_seek_shared,(MPI_File fh, MPI_Offset offset, int whence), (fh, offset, whence)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_position_shared,(MPI_File fh, MPI_Offset *offset), (fh, offset)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_at_all_begin,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype), (fh, offset, buf, count, datatype)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_at_all_end,(MPI_File fh, void *buf, MPI_Status *status), (fh, buf, status)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_at_all_begin,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype), (fh, offset, buf, count, datatype)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_at_all_end,(MPI_File fh, void *buf, MPI_Status *status), (fh, buf, status)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_all_begin,(MPI_File fh, void *buf, int count, MPI_Datatype datatype), (fh, buf, count, datatype)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_all_end,(MPI_File fh, void *buf, MPI_Status *status), (fh, buf, status)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_all_begin,(MPI_File fh, void *buf, int count, MPI_Datatype datatype), (fh, buf, count, datatype)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_all_end,(MPI_File fh, void *buf, MPI_Status *status), (fh, buf, status)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_ordered_begin,(MPI_File fh, void *buf, int count, MPI_Datatype datatype), (fh, buf, count, datatype)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_ordered_end,(MPI_File fh, void *buf, MPI_Status *status), (fh, buf, status)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_ordered_begin,(MPI_File fh, void *buf, int count, MPI_Datatype datatype), (fh, buf, count, datatype)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_ordered_end,(MPI_File fh, void *buf, MPI_Status *status), (fh, buf, status)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_type_extent,(MPI_File fh, MPI_Datatype datatype, MPI_Aint *extent), (fh, datatype, extent)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_set_atomicity,(MPI_File fh, int flag), (fh, flag)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_atomicity,(MPI_File fh, int *flag), (fh, flag)) +UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_sync,(MPI_File fh), (fh)) UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Get_elements,(MPI_Status* status, MPI_Datatype datatype, int* elements) ,(status, datatype, elements)) UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Graph_create,(MPI_Comm comm_old, int nnodes, int* index, int* edges, int reorder, MPI_Comm* comm_graph) ,(comm_old, nnodes, index, edges, reorder, comm_graph)) UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Graphdims_get,(MPI_Comm comm, int* nnodes, int* nedges) ,(comm, nnodes, nedges)) diff --git a/src/smpi/bindings/smpi_pmpi.cpp b/src/smpi/bindings/smpi_pmpi.cpp new file mode 100644 index 0000000000..b939d4c958 --- /dev/null +++ b/src/smpi/bindings/smpi_pmpi.cpp @@ -0,0 +1,218 @@ +/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +#include "simgrid/s4u/Engine.hpp" +#include "simgrid/s4u/Host.hpp" +#include "private.h" +#include "smpi_comm.hpp" +#include "smpi_datatype_derived.hpp" +#include "smpi_process.hpp" +#include "smpi_status.hpp" + +XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_pmpi, smpi, "Logging specific to SMPI (pmpi)"); + +//this function need to be here because of the calls to smpi_bench +void TRACE_smpi_set_category(const char *category) +{ + //need to end bench otherwise categories for execution tasks are wrong + smpi_bench_end(); + TRACE_internal_smpi_set_category (category); + //begin bench after changing process's category + smpi_bench_begin(); +} + +/* PMPI User level calls */ +extern "C" { // Obviously, the C MPI interface should use the C linkage + +int PMPI_Init(int *argc, char ***argv) +{ + xbt_assert(simgrid::s4u::Engine::isInitialized(), + "Your MPI program was not properly initialized. The easiest is to use smpirun to start it."); + // PMPI_Init is called only once per SMPI process + int already_init; + MPI_Initialized(&already_init); + if(already_init == 0){ + simgrid::smpi::Process::init(argc, argv); + smpi_process()->mark_as_initialized(); + int rank = smpi_process()->index(); + TRACE_smpi_init(rank); + TRACE_smpi_computing_init(rank); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_INIT; + TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); + smpi_bench_begin(); + } + + smpi_mpi_init(); + + return MPI_SUCCESS; +} + +int PMPI_Finalize() +{ + smpi_bench_end(); + int rank = smpi_process()->index(); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_FINALIZE; + TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); + + smpi_process()->finalize(); + + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); + TRACE_smpi_finalize(smpi_process()->index()); + return MPI_SUCCESS; +} + +int PMPI_Finalized(int* flag) +{ + *flag=smpi_process()!=nullptr ? smpi_process()->finalized() : 0; + return MPI_SUCCESS; +} + +int PMPI_Get_version (int *version,int *subversion){ + *version = MPI_VERSION; + *subversion= MPI_SUBVERSION; + return MPI_SUCCESS; +} + +int PMPI_Get_library_version (char *version,int *len){ + smpi_bench_end(); + snprintf(version, MPI_MAX_LIBRARY_VERSION_STRING, "SMPI Version %d.%d. Copyright The Simgrid Team 2007-2017", + SIMGRID_VERSION_MAJOR, SIMGRID_VERSION_MINOR); + *len = strlen(version) > MPI_MAX_LIBRARY_VERSION_STRING ? MPI_MAX_LIBRARY_VERSION_STRING : strlen(version); + smpi_bench_begin(); + return MPI_SUCCESS; +} + +int PMPI_Init_thread(int *argc, char ***argv, int required, int *provided) +{ + if (provided != nullptr) { + *provided = MPI_THREAD_SINGLE; + } + return MPI_Init(argc, argv); +} + +int PMPI_Query_thread(int *provided) +{ + if (provided == nullptr) { + return MPI_ERR_ARG; + } else { + *provided = MPI_THREAD_SINGLE; + return MPI_SUCCESS; + } +} + +int PMPI_Is_thread_main(int *flag) +{ + if (flag == nullptr) { + return MPI_ERR_ARG; + } else { + *flag = smpi_process()->index() == 0; + return MPI_SUCCESS; + } +} + +int PMPI_Abort(MPI_Comm comm, int errorcode) +{ + smpi_bench_end(); + // FIXME: should kill all processes in comm instead + simcall_process_kill(SIMIX_process_self()); + return MPI_SUCCESS; +} + +double PMPI_Wtime() +{ + return smpi_mpi_wtime(); +} + +extern double sg_maxmin_precision; +double PMPI_Wtick() +{ + return sg_maxmin_precision; +} + +int PMPI_Address(void *location, MPI_Aint * address) +{ + if (address==nullptr) { + return MPI_ERR_ARG; + } else { + *address = reinterpret_cast(location); + return MPI_SUCCESS; + } +} + +int PMPI_Get_address(void *location, MPI_Aint * address) +{ + return PMPI_Address(location, address); +} + +int PMPI_Get_processor_name(char *name, int *resultlen) +{ + strncpy(name, sg_host_self()->getCname(), strlen(sg_host_self()->getCname()) < MPI_MAX_PROCESSOR_NAME - 1 + ? strlen(sg_host_self()->getCname()) + 1 + : MPI_MAX_PROCESSOR_NAME - 1); + *resultlen = strlen(name) > MPI_MAX_PROCESSOR_NAME ? MPI_MAX_PROCESSOR_NAME : strlen(name); + + return MPI_SUCCESS; +} + +int PMPI_Get_count(MPI_Status * status, MPI_Datatype datatype, int *count) +{ + if (status == nullptr || count == nullptr) { + return MPI_ERR_ARG; + } else if (not datatype->is_valid()) { + return MPI_ERR_TYPE; + } else { + size_t size = datatype->size(); + if (size == 0) { + *count = 0; + return MPI_SUCCESS; + } else if (status->count % size != 0) { + return MPI_UNDEFINED; + } else { + *count = simgrid::smpi::Status::get_count(status, datatype); + return MPI_SUCCESS; + } + } +} + +int PMPI_Initialized(int* flag) { + *flag=(smpi_process()!=nullptr && smpi_process()->initialized()); + return MPI_SUCCESS; +} + +int PMPI_Alloc_mem(MPI_Aint size, MPI_Info info, void *baseptr){ + void *ptr = xbt_malloc(size); + if(ptr==nullptr) + return MPI_ERR_NO_MEM; + else { + *static_cast(baseptr) = ptr; + return MPI_SUCCESS; + } +} + +int PMPI_Free_mem(void *baseptr){ + xbt_free(baseptr); + return MPI_SUCCESS; +} + +int PMPI_Error_class(int errorcode, int* errorclass) { + // assume smpi uses only standard mpi error codes + *errorclass=errorcode; + return MPI_SUCCESS; +} + +int PMPI_Keyval_create(MPI_Copy_function* copy_fn, MPI_Delete_function* delete_fn, int* keyval, void* extra_state) { + smpi_copy_fn _copy_fn={copy_fn,nullptr,nullptr}; + smpi_delete_fn _delete_fn={delete_fn,nullptr,nullptr}; + return simgrid::smpi::Keyval::keyval_create(_copy_fn, _delete_fn, keyval, extra_state); +} + +int PMPI_Keyval_free(int* keyval) { + return simgrid::smpi::Keyval::keyval_free(keyval); +} + +} // extern "C" diff --git a/src/smpi/bindings/smpi_pmpi_coll.cpp b/src/smpi/bindings/smpi_pmpi_coll.cpp new file mode 100644 index 0000000000..7459567693 --- /dev/null +++ b/src/smpi/bindings/smpi_pmpi_coll.cpp @@ -0,0 +1,787 @@ +/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +#include "private.h" +#include "smpi_comm.hpp" +#include "smpi_coll.hpp" +#include "smpi_datatype_derived.hpp" +#include "smpi_op.hpp" +#include "smpi_process.hpp" + +XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi); + + +/* PMPI User level calls */ +extern "C" { // Obviously, the C MPI interface should use the C linkage + +int PMPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm) +{ + int retval = 0; + + smpi_bench_end(); + + if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (not datatype->is_valid()) { + retval = MPI_ERR_ARG; + } else { + int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; + int root_traced = comm->group()->index(root); + + instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); + extra->type = TRACING_BCAST; + extra->root = root_traced; + int known = 0; + extra->datatype1 = encode_datatype(datatype, &known); + int dt_size_send = 1; + if (known == 0) + dt_size_send = datatype->size(); + extra->send_size = count * dt_size_send; + TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra); + if (comm->size() > 1) + simgrid::smpi::Colls::bcast(buf, count, datatype, root, comm); + retval = MPI_SUCCESS; + + TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Barrier(MPI_Comm comm) +{ + int retval = 0; + + smpi_bench_end(); + + if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else { + int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; + instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); + extra->type = TRACING_BARRIER; + TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); + + simgrid::smpi::Colls::barrier(comm); + + //Barrier can be used to synchronize RMA calls. Finish all requests from comm before. + comm->finish_rma_calls(); + + retval = MPI_SUCCESS; + + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); + } + + smpi_bench_begin(); + return retval; +} + +int PMPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvbuf, int recvcount, MPI_Datatype recvtype, + int root, MPI_Comm comm) +{ + int retval = 0; + + smpi_bench_end(); + + if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) || + ((comm->rank() == root) && (recvtype == MPI_DATATYPE_NULL))){ + retval = MPI_ERR_TYPE; + } else if ((( sendbuf != MPI_IN_PLACE) && (sendcount <0)) || ((comm->rank() == root) && (recvcount <0))){ + retval = MPI_ERR_COUNT; + } else { + + char* sendtmpbuf = static_cast(sendbuf); + int sendtmpcount = sendcount; + MPI_Datatype sendtmptype = sendtype; + if( (comm->rank() == root) && (sendbuf == MPI_IN_PLACE )) { + sendtmpcount=0; + sendtmptype=recvtype; + } + int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; + int root_traced = comm->group()->index(root); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); + extra->type = TRACING_GATHER; + extra->root = root_traced; + int known = 0; + extra->datatype1 = encode_datatype(sendtmptype, &known); + int dt_size_send = 1; + if (known == 0) + dt_size_send = sendtmptype->size(); + extra->send_size = sendtmpcount * dt_size_send; + extra->datatype2 = encode_datatype(recvtype, &known); + int dt_size_recv = 1; + if ((comm->rank() == root) && known == 0) + dt_size_recv = recvtype->size(); + extra->recv_size = recvcount * dt_size_recv; + + TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra); + + simgrid::smpi::Colls::gather(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcount, recvtype, root, comm); + + retval = MPI_SUCCESS; + TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__); + } + + smpi_bench_begin(); + return retval; +} + +int PMPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int *recvcounts, int *displs, + MPI_Datatype recvtype, int root, MPI_Comm comm) +{ + int retval = 0; + + smpi_bench_end(); + + if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) || + ((comm->rank() == root) && (recvtype == MPI_DATATYPE_NULL))){ + retval = MPI_ERR_TYPE; + } else if (( sendbuf != MPI_IN_PLACE) && (sendcount <0)){ + retval = MPI_ERR_COUNT; + } else if (recvcounts == nullptr || displs == nullptr) { + retval = MPI_ERR_ARG; + } else { + char* sendtmpbuf = static_cast(sendbuf); + int sendtmpcount = sendcount; + MPI_Datatype sendtmptype = sendtype; + if( (comm->rank() == root) && (sendbuf == MPI_IN_PLACE )) { + sendtmpcount=0; + sendtmptype=recvtype; + } + + int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; + int root_traced = comm->group()->index(root); + int size = comm->size(); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); + extra->type = TRACING_GATHERV; + extra->num_processes = size; + extra->root = root_traced; + int known = 0; + extra->datatype1 = encode_datatype(sendtmptype, &known); + int dt_size_send = 1; + if (known == 0) + dt_size_send = sendtype->size(); + extra->send_size = sendtmpcount * dt_size_send; + extra->datatype2 = encode_datatype(recvtype, &known); + int dt_size_recv = 1; + if (known == 0) + dt_size_recv = recvtype->size(); + if (comm->rank() == root) { + extra->recvcounts = xbt_new(int, size); + for (int i = 0; i < size; i++) // copy data to avoid bad free + extra->recvcounts[i] = recvcounts[i] * dt_size_recv; + } + TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra); + + retval = simgrid::smpi::Colls::gatherv(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcounts, displs, recvtype, root, comm); + TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__); + } + + smpi_bench_begin(); + return retval; +} + +int PMPI_Allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype, + void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm) +{ + int retval = 0; + + smpi_bench_end(); + + if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) || + (recvtype == MPI_DATATYPE_NULL)){ + retval = MPI_ERR_TYPE; + } else if ((( sendbuf != MPI_IN_PLACE) && (sendcount <0)) || + (recvcount <0)){ + retval = MPI_ERR_COUNT; + } else { + if(sendbuf == MPI_IN_PLACE) { + sendbuf=static_cast(recvbuf)+recvtype->get_extent()*recvcount*comm->rank(); + sendcount=recvcount; + sendtype=recvtype; + } + int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; + instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); + extra->type = TRACING_ALLGATHER; + int known = 0; + extra->datatype1 = encode_datatype(sendtype, &known); + int dt_size_send = 1; + if (known == 0) + dt_size_send = sendtype->size(); + extra->send_size = sendcount * dt_size_send; + extra->datatype2 = encode_datatype(recvtype, &known); + int dt_size_recv = 1; + if (known == 0) + dt_size_recv = recvtype->size(); + extra->recv_size = recvcount * dt_size_recv; + + TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); + + simgrid::smpi::Colls::allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm); + retval = MPI_SUCCESS; + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, + void *recvbuf, int *recvcounts, int *displs, MPI_Datatype recvtype, MPI_Comm comm) +{ + int retval = 0; + + smpi_bench_end(); + + if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (((sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) || (recvtype == MPI_DATATYPE_NULL)) { + retval = MPI_ERR_TYPE; + } else if (( sendbuf != MPI_IN_PLACE) && (sendcount <0)){ + retval = MPI_ERR_COUNT; + } else if (recvcounts == nullptr || displs == nullptr) { + retval = MPI_ERR_ARG; + } else { + + if(sendbuf == MPI_IN_PLACE) { + sendbuf=static_cast(recvbuf)+recvtype->get_extent()*displs[comm->rank()]; + sendcount=recvcounts[comm->rank()]; + sendtype=recvtype; + } + int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; + int i = 0; + int size = comm->size(); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); + extra->type = TRACING_ALLGATHERV; + extra->num_processes = size; + int known = 0; + extra->datatype1 = encode_datatype(sendtype, &known); + int dt_size_send = 1; + if (known == 0) + dt_size_send = sendtype->size(); + extra->send_size = sendcount * dt_size_send; + extra->datatype2 = encode_datatype(recvtype, &known); + int dt_size_recv = 1; + if (known == 0) + dt_size_recv = recvtype->size(); + extra->recvcounts = xbt_new(int, size); + for (i = 0; i < size; i++) // copy data to avoid bad free + extra->recvcounts[i] = recvcounts[i] * dt_size_recv; + + TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); + + simgrid::smpi::Colls::allgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm); + retval = MPI_SUCCESS; + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); + } + + smpi_bench_begin(); + return retval; +} + +int PMPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype, + void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm) +{ + int retval = 0; + + smpi_bench_end(); + + if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (((comm->rank() == root) && (not sendtype->is_valid())) || + ((recvbuf != MPI_IN_PLACE) && (not recvtype->is_valid()))) { + retval = MPI_ERR_TYPE; + } else if ((sendbuf == recvbuf) || + ((comm->rank()==root) && sendcount>0 && (sendbuf == nullptr))){ + retval = MPI_ERR_BUFFER; + }else { + + if (recvbuf == MPI_IN_PLACE) { + recvtype = sendtype; + recvcount = sendcount; + } + int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; + int root_traced = comm->group()->index(root); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); + extra->type = TRACING_SCATTER; + extra->root = root_traced; + int known = 0; + extra->datatype1 = encode_datatype(sendtype, &known); + int dt_size_send = 1; + if ((comm->rank() == root) && known == 0) + dt_size_send = sendtype->size(); + extra->send_size = sendcount * dt_size_send; + extra->datatype2 = encode_datatype(recvtype, &known); + int dt_size_recv = 1; + if (known == 0) + dt_size_recv = recvtype->size(); + extra->recv_size = recvcount * dt_size_recv; + TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra); + + simgrid::smpi::Colls::scatter(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm); + retval = MPI_SUCCESS; + TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__); + } + + smpi_bench_begin(); + return retval; +} + +int PMPI_Scatterv(void *sendbuf, int *sendcounts, int *displs, + MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm) +{ + int retval = 0; + + smpi_bench_end(); + + if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (sendcounts == nullptr || displs == nullptr) { + retval = MPI_ERR_ARG; + } else if (((comm->rank() == root) && (sendtype == MPI_DATATYPE_NULL)) || + ((recvbuf != MPI_IN_PLACE) && (recvtype == MPI_DATATYPE_NULL))) { + retval = MPI_ERR_TYPE; + } else { + if (recvbuf == MPI_IN_PLACE) { + recvtype = sendtype; + recvcount = sendcounts[comm->rank()]; + } + int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; + int root_traced = comm->group()->index(root); + int size = comm->size(); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); + extra->type = TRACING_SCATTERV; + extra->num_processes = size; + extra->root = root_traced; + int known = 0; + extra->datatype1 = encode_datatype(sendtype, &known); + int dt_size_send = 1; + if (known == 0) + dt_size_send = sendtype->size(); + if (comm->rank() == root) { + extra->sendcounts = xbt_new(int, size); + for (int i = 0; i < size; i++) // copy data to avoid bad free + extra->sendcounts[i] = sendcounts[i] * dt_size_send; + } + extra->datatype2 = encode_datatype(recvtype, &known); + int dt_size_recv = 1; + if (known == 0) + dt_size_recv = recvtype->size(); + extra->recv_size = recvcount * dt_size_recv; + TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra); + + retval = simgrid::smpi::Colls::scatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm); + + TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__); + } + + smpi_bench_begin(); + return retval; +} + +int PMPI_Reduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm) +{ + int retval = 0; + + smpi_bench_end(); + + if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (not datatype->is_valid() || op == MPI_OP_NULL) { + retval = MPI_ERR_ARG; + } else { + int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; + int root_traced = comm->group()->index(root); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); + extra->type = TRACING_REDUCE; + int known = 0; + extra->datatype1 = encode_datatype(datatype, &known); + int dt_size_send = 1; + if (known == 0) + dt_size_send = datatype->size(); + extra->send_size = count * dt_size_send; + extra->root = root_traced; + + TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra); + + simgrid::smpi::Colls::reduce(sendbuf, recvbuf, count, datatype, op, root, comm); + + retval = MPI_SUCCESS; + TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__); + } + + smpi_bench_begin(); + return retval; +} + +int PMPI_Reduce_local(void *inbuf, void *inoutbuf, int count, MPI_Datatype datatype, MPI_Op op){ + int retval = 0; + + smpi_bench_end(); + if (not datatype->is_valid() || op == MPI_OP_NULL) { + retval = MPI_ERR_ARG; + } else { + op->apply(inbuf, inoutbuf, &count, datatype); + retval = MPI_SUCCESS; + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Allreduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) +{ + int retval = 0; + + smpi_bench_end(); + + if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (not datatype->is_valid()) { + retval = MPI_ERR_TYPE; + } else if (op == MPI_OP_NULL) { + retval = MPI_ERR_OP; + } else { + + char* sendtmpbuf = static_cast(sendbuf); + if( sendbuf == MPI_IN_PLACE ) { + sendtmpbuf = static_cast(xbt_malloc(count*datatype->get_extent())); + simgrid::smpi::Datatype::copy(recvbuf, count, datatype,sendtmpbuf, count, datatype); + } + int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; + instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); + extra->type = TRACING_ALLREDUCE; + int known = 0; + extra->datatype1 = encode_datatype(datatype, &known); + int dt_size_send = 1; + if (known == 0) + dt_size_send = datatype->size(); + extra->send_size = count * dt_size_send; + + TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); + + simgrid::smpi::Colls::allreduce(sendtmpbuf, recvbuf, count, datatype, op, comm); + + if( sendbuf == MPI_IN_PLACE ) + xbt_free(sendtmpbuf); + + retval = MPI_SUCCESS; + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); + } + + smpi_bench_begin(); + return retval; +} + +int PMPI_Scan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) +{ + int retval = 0; + + smpi_bench_end(); + + if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (not datatype->is_valid()) { + retval = MPI_ERR_TYPE; + } else if (op == MPI_OP_NULL) { + retval = MPI_ERR_OP; + } else { + int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; + instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); + extra->type = TRACING_SCAN; + int known = 0; + extra->datatype1 = encode_datatype(datatype, &known); + int dt_size_send = 1; + if (known == 0) + dt_size_send = datatype->size(); + extra->send_size = count * dt_size_send; + + TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); + + retval = simgrid::smpi::Colls::scan(sendbuf, recvbuf, count, datatype, op, comm); + + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); + } + + smpi_bench_begin(); + return retval; +} + +int PMPI_Exscan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm){ + int retval = 0; + + smpi_bench_end(); + + if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (not datatype->is_valid()) { + retval = MPI_ERR_TYPE; + } else if (op == MPI_OP_NULL) { + retval = MPI_ERR_OP; + } else { + int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; + instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); + extra->type = TRACING_EXSCAN; + int known = 0; + extra->datatype1 = encode_datatype(datatype, &known); + int dt_size_send = 1; + if (known == 0) + dt_size_send = datatype->size(); + extra->send_size = count * dt_size_send; + void* sendtmpbuf = sendbuf; + if (sendbuf == MPI_IN_PLACE) { + sendtmpbuf = static_cast(xbt_malloc(count * datatype->size())); + memcpy(sendtmpbuf, recvbuf, count * datatype->size()); + } + TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); + + retval = simgrid::smpi::Colls::exscan(sendtmpbuf, recvbuf, count, datatype, op, comm); + + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); + if (sendbuf == MPI_IN_PLACE) + xbt_free(sendtmpbuf); + } + + smpi_bench_begin(); + return retval; +} + +int PMPI_Reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) +{ + int retval = 0; + smpi_bench_end(); + + if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (not datatype->is_valid()) { + retval = MPI_ERR_TYPE; + } else if (op == MPI_OP_NULL) { + retval = MPI_ERR_OP; + } else if (recvcounts == nullptr) { + retval = MPI_ERR_ARG; + } else { + int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; + int i = 0; + int size = comm->size(); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); + extra->type = TRACING_REDUCE_SCATTER; + extra->num_processes = size; + int known = 0; + extra->datatype1 = encode_datatype(datatype, &known); + int dt_size_send = 1; + if (known == 0) + dt_size_send = datatype->size(); + extra->send_size = 0; + extra->recvcounts = xbt_new(int, size); + int totalcount = 0; + for (i = 0; i < size; i++) { // copy data to avoid bad free + extra->recvcounts[i] = recvcounts[i] * dt_size_send; + totalcount += recvcounts[i]; + } + void* sendtmpbuf = sendbuf; + if (sendbuf == MPI_IN_PLACE) { + sendtmpbuf = static_cast(xbt_malloc(totalcount * datatype->size())); + memcpy(sendtmpbuf, recvbuf, totalcount * datatype->size()); + } + + TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); + + simgrid::smpi::Colls::reduce_scatter(sendtmpbuf, recvbuf, recvcounts, datatype, op, comm); + retval = MPI_SUCCESS; + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); + + if (sendbuf == MPI_IN_PLACE) + xbt_free(sendtmpbuf); + } + + smpi_bench_begin(); + return retval; +} + +int PMPI_Reduce_scatter_block(void *sendbuf, void *recvbuf, int recvcount, + MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) +{ + int retval; + smpi_bench_end(); + + if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (not datatype->is_valid()) { + retval = MPI_ERR_TYPE; + } else if (op == MPI_OP_NULL) { + retval = MPI_ERR_OP; + } else if (recvcount < 0) { + retval = MPI_ERR_ARG; + } else { + int count = comm->size(); + + int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; + instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); + extra->type = TRACING_REDUCE_SCATTER; + extra->num_processes = count; + int known = 0; + extra->datatype1 = encode_datatype(datatype, &known); + int dt_size_send = 1; + if (known == 0) + dt_size_send = datatype->size(); + extra->send_size = 0; + extra->recvcounts = xbt_new(int, count); + for (int i = 0; i < count; i++) // copy data to avoid bad free + extra->recvcounts[i] = recvcount * dt_size_send; + void* sendtmpbuf = sendbuf; + if (sendbuf == MPI_IN_PLACE) { + sendtmpbuf = static_cast(xbt_malloc(recvcount * count * datatype->size())); + memcpy(sendtmpbuf, recvbuf, recvcount * count * datatype->size()); + } + + TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); + + int* recvcounts = static_cast(xbt_malloc(count * sizeof(int))); + for (int i = 0; i < count; i++) + recvcounts[i] = recvcount; + simgrid::smpi::Colls::reduce_scatter(sendtmpbuf, recvbuf, recvcounts, datatype, op, comm); + xbt_free(recvcounts); + retval = MPI_SUCCESS; + + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); + + if (sendbuf == MPI_IN_PLACE) + xbt_free(sendtmpbuf); + } + + smpi_bench_begin(); + return retval; +} + +int PMPI_Alltoall(void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount, + MPI_Datatype recvtype, MPI_Comm comm) +{ + int retval = 0; + smpi_bench_end(); + + if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if ((sendbuf != MPI_IN_PLACE && sendtype == MPI_DATATYPE_NULL) || recvtype == MPI_DATATYPE_NULL) { + retval = MPI_ERR_TYPE; + } else { + int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; + instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); + extra->type = TRACING_ALLTOALL; + + void* sendtmpbuf = static_cast(sendbuf); + int sendtmpcount = sendcount; + MPI_Datatype sendtmptype = sendtype; + if (sendbuf == MPI_IN_PLACE) { + sendtmpbuf = static_cast(xbt_malloc(recvcount * comm->size() * recvtype->size())); + memcpy(sendtmpbuf, recvbuf, recvcount * comm->size() * recvtype->size()); + sendtmpcount = recvcount; + sendtmptype = recvtype; + } + + int known = 0; + extra->datatype1 = encode_datatype(sendtmptype, &known); + if (known == 0) + extra->send_size = sendtmpcount * sendtmptype->size(); + else + extra->send_size = sendtmpcount; + extra->datatype2 = encode_datatype(recvtype, &known); + if (known == 0) + extra->recv_size = recvcount * recvtype->size(); + else + extra->recv_size = recvcount; + + TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); + + retval = simgrid::smpi::Colls::alltoall(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcount, recvtype, comm); + + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); + + if (sendbuf == MPI_IN_PLACE) + xbt_free(sendtmpbuf); + } + + smpi_bench_begin(); + return retval; +} + +int PMPI_Alltoallv(void* sendbuf, int* sendcounts, int* senddisps, MPI_Datatype sendtype, void* recvbuf, + int* recvcounts, int* recvdisps, MPI_Datatype recvtype, MPI_Comm comm) +{ + int retval = 0; + + smpi_bench_end(); + + if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (sendtype == MPI_DATATYPE_NULL || recvtype == MPI_DATATYPE_NULL) { + retval = MPI_ERR_TYPE; + } else if ((sendbuf != MPI_IN_PLACE && (sendcounts == nullptr || senddisps == nullptr)) || recvcounts == nullptr || + recvdisps == nullptr) { + retval = MPI_ERR_ARG; + } else { + int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; + int i = 0; + int size = comm->size(); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); + extra->type = TRACING_ALLTOALLV; + extra->send_size = 0; + extra->recv_size = 0; + extra->recvcounts = xbt_new(int, size); + extra->sendcounts = xbt_new(int, size); + int known = 0; + extra->datatype2 = encode_datatype(recvtype, &known); + int dt_size_recv = recvtype->size(); + + void* sendtmpbuf = static_cast(sendbuf); + int* sendtmpcounts = sendcounts; + int* sendtmpdisps = senddisps; + MPI_Datatype sendtmptype = sendtype; + int maxsize = 0; + for (i = 0; i < size; i++) { // copy data to avoid bad free + extra->recv_size += recvcounts[i] * dt_size_recv; + extra->recvcounts[i] = recvcounts[i] * dt_size_recv; + if (((recvdisps[i] + recvcounts[i]) * dt_size_recv) > maxsize) + maxsize = (recvdisps[i] + recvcounts[i]) * dt_size_recv; + } + + if (sendbuf == MPI_IN_PLACE) { + sendtmpbuf = static_cast(xbt_malloc(maxsize)); + memcpy(sendtmpbuf, recvbuf, maxsize); + sendtmpcounts = static_cast(xbt_malloc(size * sizeof(int))); + memcpy(sendtmpcounts, recvcounts, size * sizeof(int)); + sendtmpdisps = static_cast(xbt_malloc(size * sizeof(int))); + memcpy(sendtmpdisps, recvdisps, size * sizeof(int)); + sendtmptype = recvtype; + } + + extra->datatype1 = encode_datatype(sendtmptype, &known); + int dt_size_send = sendtmptype->size(); + + for (i = 0; i < size; i++) { // copy data to avoid bad free + extra->send_size += sendtmpcounts[i] * dt_size_send; + extra->sendcounts[i] = sendtmpcounts[i] * dt_size_send; + } + extra->num_processes = size; + TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); + retval = simgrid::smpi::Colls::alltoallv(sendtmpbuf, sendtmpcounts, sendtmpdisps, sendtmptype, recvbuf, recvcounts, + recvdisps, recvtype, comm); + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); + + if (sendbuf == MPI_IN_PLACE) { + xbt_free(sendtmpbuf); + xbt_free(sendtmpcounts); + xbt_free(sendtmpdisps); + } + } + + smpi_bench_begin(); + return retval; +} + +} diff --git a/src/smpi/bindings/smpi_pmpi_comm.cpp b/src/smpi/bindings/smpi_pmpi_comm.cpp new file mode 100644 index 0000000000..f658bfe078 --- /dev/null +++ b/src/smpi/bindings/smpi_pmpi_comm.cpp @@ -0,0 +1,269 @@ +/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +#include + +#include "private.h" +#include "smpi_comm.hpp" +#include "smpi_process.hpp" + +XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi); + +/* PMPI User level calls */ +extern "C" { // Obviously, the C MPI interface should use the C linkage + +int PMPI_Comm_rank(MPI_Comm comm, int *rank) +{ + if (comm == MPI_COMM_NULL) { + return MPI_ERR_COMM; + } else if (rank == nullptr) { + return MPI_ERR_ARG; + } else { + *rank = comm->rank(); + return MPI_SUCCESS; + } +} + +int PMPI_Comm_size(MPI_Comm comm, int *size) +{ + if (comm == MPI_COMM_NULL) { + return MPI_ERR_COMM; + } else if (size == nullptr) { + return MPI_ERR_ARG; + } else { + *size = comm->size(); + return MPI_SUCCESS; + } +} + +int PMPI_Comm_get_name (MPI_Comm comm, char* name, int* len) +{ + if (comm == MPI_COMM_NULL) { + return MPI_ERR_COMM; + } else if (name == nullptr || len == nullptr) { + return MPI_ERR_ARG; + } else { + comm->get_name(name, len); + return MPI_SUCCESS; + } +} + +int PMPI_Comm_group(MPI_Comm comm, MPI_Group * group) +{ + if (comm == MPI_COMM_NULL) { + return MPI_ERR_COMM; + } else if (group == nullptr) { + return MPI_ERR_ARG; + } else { + *group = comm->group(); + if (*group != MPI_COMM_WORLD->group() && *group != MPI_GROUP_NULL && *group != MPI_GROUP_EMPTY) + (*group)->ref(); + return MPI_SUCCESS; + } +} + +int PMPI_Comm_compare(MPI_Comm comm1, MPI_Comm comm2, int *result) +{ + if (comm1 == MPI_COMM_NULL || comm2 == MPI_COMM_NULL) { + return MPI_ERR_COMM; + } else if (result == nullptr) { + return MPI_ERR_ARG; + } else { + if (comm1 == comm2) { /* Same communicators means same groups */ + *result = MPI_IDENT; + } else { + *result = comm1->group()->compare(comm2->group()); + if (*result == MPI_IDENT) { + *result = MPI_CONGRUENT; + } + } + return MPI_SUCCESS; + } +} + +int PMPI_Comm_dup(MPI_Comm comm, MPI_Comm * newcomm) +{ + if (comm == MPI_COMM_NULL) { + return MPI_ERR_COMM; + } else if (newcomm == nullptr) { + return MPI_ERR_ARG; + } else { + return comm->dup(newcomm); + } +} + +int PMPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm * newcomm) +{ + if (comm == MPI_COMM_NULL) { + return MPI_ERR_COMM; + } else if (group == MPI_GROUP_NULL) { + return MPI_ERR_GROUP; + } else if (newcomm == nullptr) { + return MPI_ERR_ARG; + } else if(group->rank(smpi_process()->index())==MPI_UNDEFINED){ + *newcomm= MPI_COMM_NULL; + return MPI_SUCCESS; + }else{ + group->ref(); + *newcomm = new simgrid::smpi::Comm(group, nullptr); + return MPI_SUCCESS; + } +} + +int PMPI_Comm_free(MPI_Comm * comm) +{ + if (comm == nullptr) { + return MPI_ERR_ARG; + } else if (*comm == MPI_COMM_NULL) { + return MPI_ERR_COMM; + } else { + simgrid::smpi::Comm::destroy(*comm); + *comm = MPI_COMM_NULL; + return MPI_SUCCESS; + } +} + +int PMPI_Comm_disconnect(MPI_Comm * comm) +{ + /* TODO: wait until all communication in comm are done */ + if (comm == nullptr) { + return MPI_ERR_ARG; + } else if (*comm == MPI_COMM_NULL) { + return MPI_ERR_COMM; + } else { + simgrid::smpi::Comm::destroy(*comm); + *comm = MPI_COMM_NULL; + return MPI_SUCCESS; + } +} + +int PMPI_Comm_split(MPI_Comm comm, int color, int key, MPI_Comm* comm_out) +{ + int retval = 0; + smpi_bench_end(); + + if (comm_out == nullptr) { + retval = MPI_ERR_ARG; + } else if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else { + *comm_out = comm->split(color, key); + retval = MPI_SUCCESS; + } + smpi_bench_begin(); + + return retval; +} + +int PMPI_Comm_create_group(MPI_Comm comm, MPI_Group group, int, MPI_Comm* comm_out) +{ + int retval = 0; + smpi_bench_end(); + + if (comm_out == nullptr) { + retval = MPI_ERR_ARG; + } else if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else { + retval = MPI_Comm_create(comm, group, comm_out); + } + smpi_bench_begin(); + + return retval; +} + +MPI_Comm PMPI_Comm_f2c(MPI_Fint comm){ + return static_cast(simgrid::smpi::Comm::f2c(comm)); +} + +MPI_Fint PMPI_Comm_c2f(MPI_Comm comm){ + return comm->c2f(); +} + +int PMPI_Comm_get_attr (MPI_Comm comm, int comm_keyval, void *attribute_val, int *flag) +{ + return PMPI_Attr_get(comm, comm_keyval, attribute_val,flag); +} + +int PMPI_Comm_set_attr (MPI_Comm comm, int comm_keyval, void *attribute_val) +{ + return PMPI_Attr_put(comm, comm_keyval, attribute_val); +} + +int PMPI_Comm_delete_attr (MPI_Comm comm, int comm_keyval) +{ + return PMPI_Attr_delete(comm, comm_keyval); +} + +int PMPI_Comm_create_keyval(MPI_Comm_copy_attr_function* copy_fn, MPI_Comm_delete_attr_function* delete_fn, int* keyval, + void* extra_state) +{ + return PMPI_Keyval_create(copy_fn, delete_fn, keyval, extra_state); +} + +int PMPI_Comm_free_keyval(int* keyval) { + return PMPI_Keyval_free(keyval); +} + +int PMPI_Attr_delete(MPI_Comm comm, int keyval) { + if(keyval == MPI_TAG_UB||keyval == MPI_HOST||keyval == MPI_IO ||keyval == MPI_WTIME_IS_GLOBAL||keyval == MPI_APPNUM + ||keyval == MPI_UNIVERSE_SIZE||keyval == MPI_LASTUSEDCODE) + return MPI_ERR_ARG; + else if (comm==MPI_COMM_NULL) + return MPI_ERR_COMM; + else + return comm->attr_delete(keyval); +} + +int PMPI_Attr_get(MPI_Comm comm, int keyval, void* attr_value, int* flag) { + static int one = 1; + static int zero = 0; + static int tag_ub = INT_MAX; + static int last_used_code = MPI_ERR_LASTCODE; + + if (comm==MPI_COMM_NULL){ + *flag = 0; + return MPI_ERR_COMM; + } + + switch (keyval) { + case MPI_HOST: + case MPI_IO: + case MPI_APPNUM: + *flag = 1; + *static_cast(attr_value) = &zero; + return MPI_SUCCESS; + case MPI_UNIVERSE_SIZE: + *flag = 1; + *static_cast(attr_value) = &smpi_universe_size; + return MPI_SUCCESS; + case MPI_LASTUSEDCODE: + *flag = 1; + *static_cast(attr_value) = &last_used_code; + return MPI_SUCCESS; + case MPI_TAG_UB: + *flag=1; + *static_cast(attr_value) = &tag_ub; + return MPI_SUCCESS; + case MPI_WTIME_IS_GLOBAL: + *flag = 1; + *static_cast(attr_value) = &one; + return MPI_SUCCESS; + default: + return comm->attr_get(keyval, attr_value, flag); + } +} + +int PMPI_Attr_put(MPI_Comm comm, int keyval, void* attr_value) { + if(keyval == MPI_TAG_UB||keyval == MPI_HOST||keyval == MPI_IO ||keyval == MPI_WTIME_IS_GLOBAL||keyval == MPI_APPNUM + ||keyval == MPI_UNIVERSE_SIZE||keyval == MPI_LASTUSEDCODE) + return MPI_ERR_ARG; + else if (comm==MPI_COMM_NULL) + return MPI_ERR_COMM; + else + return comm->attr_put(keyval, attr_value); +} + +} diff --git a/src/smpi/bindings/smpi_pmpi_group.cpp b/src/smpi/bindings/smpi_pmpi_group.cpp new file mode 100644 index 0000000000..8adeecc422 --- /dev/null +++ b/src/smpi/bindings/smpi_pmpi_group.cpp @@ -0,0 +1,196 @@ +/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +#include "private.h" +#include "smpi_comm.hpp" +#include "smpi_coll.hpp" +#include "smpi_datatype_derived.hpp" +#include "smpi_op.hpp" +#include "smpi_process.hpp" + +XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi); + + +/* PMPI User level calls */ +extern "C" { // Obviously, the C MPI interface should use the C linkage + +int PMPI_Group_free(MPI_Group * group) +{ + if (group == nullptr) { + return MPI_ERR_ARG; + } else { + if(*group != MPI_COMM_WORLD->group() && *group != MPI_GROUP_EMPTY) + simgrid::smpi::Group::unref(*group); + *group = MPI_GROUP_NULL; + return MPI_SUCCESS; + } +} + +int PMPI_Group_size(MPI_Group group, int *size) +{ + if (group == MPI_GROUP_NULL) { + return MPI_ERR_GROUP; + } else if (size == nullptr) { + return MPI_ERR_ARG; + } else { + *size = group->size(); + return MPI_SUCCESS; + } +} + +int PMPI_Group_rank(MPI_Group group, int *rank) +{ + if (group == MPI_GROUP_NULL) { + return MPI_ERR_GROUP; + } else if (rank == nullptr) { + return MPI_ERR_ARG; + } else { + *rank = group->rank(smpi_process()->index()); + return MPI_SUCCESS; + } +} + +int PMPI_Group_translate_ranks(MPI_Group group1, int n, int *ranks1, MPI_Group group2, int *ranks2) +{ + if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) { + return MPI_ERR_GROUP; + } else { + for (int i = 0; i < n; i++) { + if(ranks1[i]==MPI_PROC_NULL){ + ranks2[i]=MPI_PROC_NULL; + }else{ + int index = group1->index(ranks1[i]); + ranks2[i] = group2->rank(index); + } + } + return MPI_SUCCESS; + } +} + +int PMPI_Group_compare(MPI_Group group1, MPI_Group group2, int *result) +{ + if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) { + return MPI_ERR_GROUP; + } else if (result == nullptr) { + return MPI_ERR_ARG; + } else { + *result = group1->compare(group2); + return MPI_SUCCESS; + } +} + +int PMPI_Group_union(MPI_Group group1, MPI_Group group2, MPI_Group * newgroup) +{ + + if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) { + return MPI_ERR_GROUP; + } else if (newgroup == nullptr) { + return MPI_ERR_ARG; + } else { + return group1->group_union(group2, newgroup); + } +} + +int PMPI_Group_intersection(MPI_Group group1, MPI_Group group2, MPI_Group * newgroup) +{ + + if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) { + return MPI_ERR_GROUP; + } else if (newgroup == nullptr) { + return MPI_ERR_ARG; + } else { + return group1->intersection(group2,newgroup); + } +} + +int PMPI_Group_difference(MPI_Group group1, MPI_Group group2, MPI_Group * newgroup) +{ + if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) { + return MPI_ERR_GROUP; + } else if (newgroup == nullptr) { + return MPI_ERR_ARG; + } else { + return group1->difference(group2,newgroup); + } +} + +int PMPI_Group_incl(MPI_Group group, int n, int *ranks, MPI_Group * newgroup) +{ + if (group == MPI_GROUP_NULL) { + return MPI_ERR_GROUP; + } else if (newgroup == nullptr) { + return MPI_ERR_ARG; + } else { + return group->incl(n, ranks, newgroup); + } +} + +int PMPI_Group_excl(MPI_Group group, int n, int *ranks, MPI_Group * newgroup) +{ + if (group == MPI_GROUP_NULL) { + return MPI_ERR_GROUP; + } else if (newgroup == nullptr) { + return MPI_ERR_ARG; + } else { + if (n == 0) { + *newgroup = group; + if (group != MPI_COMM_WORLD->group() + && group != MPI_COMM_SELF->group() && group != MPI_GROUP_EMPTY) + group->ref(); + return MPI_SUCCESS; + } else if (n == group->size()) { + *newgroup = MPI_GROUP_EMPTY; + return MPI_SUCCESS; + } else { + return group->excl(n,ranks,newgroup); + } + } +} + +int PMPI_Group_range_incl(MPI_Group group, int n, int ranges[][3], MPI_Group * newgroup) +{ + if (group == MPI_GROUP_NULL) { + return MPI_ERR_GROUP; + } else if (newgroup == nullptr) { + return MPI_ERR_ARG; + } else { + if (n == 0) { + *newgroup = MPI_GROUP_EMPTY; + return MPI_SUCCESS; + } else { + return group->range_incl(n,ranges,newgroup); + } + } +} + +int PMPI_Group_range_excl(MPI_Group group, int n, int ranges[][3], MPI_Group * newgroup) +{ + if (group == MPI_GROUP_NULL) { + return MPI_ERR_GROUP; + } else if (newgroup == nullptr) { + return MPI_ERR_ARG; + } else { + if (n == 0) { + *newgroup = group; + if (group != MPI_COMM_WORLD->group() && group != MPI_COMM_SELF->group() && + group != MPI_GROUP_EMPTY) + group->ref(); + return MPI_SUCCESS; + } else { + return group->range_excl(n,ranges,newgroup); + } + } +} + +MPI_Group PMPI_Group_f2c(MPI_Fint group){ + return simgrid::smpi::Group::f2c(group); +} + +MPI_Fint PMPI_Group_c2f(MPI_Group group){ + return group->c2f(); +} + + +} diff --git a/src/smpi/bindings/smpi_pmpi_info.cpp b/src/smpi/bindings/smpi_pmpi_info.cpp new file mode 100644 index 0000000000..4cbd21c8f1 --- /dev/null +++ b/src/smpi/bindings/smpi_pmpi_info.cpp @@ -0,0 +1,85 @@ +/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +#include "private.h" +#include "smpi_info.hpp" + +XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi); + +/* PMPI User level calls */ +extern "C" { // Obviously, the C MPI interface should use the C linkage + +int PMPI_Info_create( MPI_Info *info){ + if (info == nullptr) + return MPI_ERR_ARG; + *info = new simgrid::smpi::Info(); + return MPI_SUCCESS; +} + +int PMPI_Info_set( MPI_Info info, char *key, char *value){ + if (info == nullptr || key == nullptr || value == nullptr) + return MPI_ERR_ARG; + info->set(key, value); + return MPI_SUCCESS; +} + +int PMPI_Info_free( MPI_Info *info){ + if (info == nullptr || *info==nullptr) + return MPI_ERR_ARG; + simgrid::smpi::Info::unref(*info); + *info=MPI_INFO_NULL; + return MPI_SUCCESS; +} + +int PMPI_Info_get(MPI_Info info,char *key,int valuelen, char *value, int *flag){ + *flag=false; + if (info == nullptr || key == nullptr || valuelen <0) + return MPI_ERR_ARG; + if (value == nullptr) + return MPI_ERR_INFO_VALUE; + return info->get(key, valuelen, value, flag); +} + +int PMPI_Info_dup(MPI_Info info, MPI_Info *newinfo){ + if (info == nullptr || newinfo==nullptr) + return MPI_ERR_ARG; + *newinfo = new simgrid::smpi::Info(info); + return MPI_SUCCESS; +} + +int PMPI_Info_delete(MPI_Info info, char *key){ + if (info == nullptr || key==nullptr) + return MPI_ERR_ARG; + return info->remove(key); +} + +int PMPI_Info_get_nkeys( MPI_Info info, int *nkeys){ + if (info == nullptr || nkeys==nullptr) + return MPI_ERR_ARG; + return info->get_nkeys(nkeys); +} + +int PMPI_Info_get_nthkey( MPI_Info info, int n, char *key){ + if (info == nullptr || key==nullptr || n<0 || n> MPI_MAX_INFO_KEY) + return MPI_ERR_ARG; + return info->get_nthkey(n, key); +} + +int PMPI_Info_get_valuelen( MPI_Info info, char *key, int *valuelen, int *flag){ + *flag=false; + if (info == nullptr || key == nullptr || valuelen==nullptr) + return MPI_ERR_ARG; + return info->get_valuelen(key, valuelen, flag); +} + +MPI_Info PMPI_Info_f2c(MPI_Fint info){ + return static_cast(simgrid::smpi::Info::f2c(info)); +} + +MPI_Fint PMPI_Info_c2f(MPI_Info info){ + return info->c2f(); +} + +} diff --git a/src/smpi/bindings/smpi_pmpi_op.cpp b/src/smpi/bindings/smpi_pmpi_op.cpp new file mode 100644 index 0000000000..013b78007e --- /dev/null +++ b/src/smpi/bindings/smpi_pmpi_op.cpp @@ -0,0 +1,56 @@ +/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +#include "private.h" +#include "smpi_op.hpp" + +XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi); + +/* PMPI User level calls */ +extern "C" { // Obviously, the C MPI interface should use the C linkage + +int PMPI_Op_create(MPI_User_function * function, int commute, MPI_Op * op) +{ + if (function == nullptr || op == nullptr) { + return MPI_ERR_ARG; + } else { + *op = new simgrid::smpi::Op(function, (commute!=0)); + return MPI_SUCCESS; + } +} + +int PMPI_Op_free(MPI_Op * op) +{ + if (op == nullptr) { + return MPI_ERR_ARG; + } else if (*op == MPI_OP_NULL) { + return MPI_ERR_OP; + } else { + delete (*op); + *op = MPI_OP_NULL; + return MPI_SUCCESS; + } +} + +int PMPI_Op_commutative(MPI_Op op, int* commute){ + if (op == MPI_OP_NULL) { + return MPI_ERR_OP; + } else if (commute==nullptr){ + return MPI_ERR_ARG; + } else { + *commute = op->is_commutative(); + return MPI_SUCCESS; + } +} + +MPI_Op PMPI_Op_f2c(MPI_Fint op){ + return static_cast(simgrid::smpi::Op::f2c(op)); +} + +MPI_Fint PMPI_Op_c2f(MPI_Op op){ + return op->c2f(); +} + +} diff --git a/src/smpi/bindings/smpi_pmpi_request.cpp b/src/smpi/bindings/smpi_pmpi_request.cpp new file mode 100644 index 0000000000..71cdf1b616 --- /dev/null +++ b/src/smpi/bindings/smpi_pmpi_request.cpp @@ -0,0 +1,787 @@ +/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +#include "private.h" +#include "smpi_comm.hpp" +#include "smpi_datatype.hpp" +#include "smpi_request.hpp" +#include "smpi_process.hpp" + +XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi); + +/* PMPI User level calls */ +extern "C" { // Obviously, the C MPI interface should use the C linkage + +int PMPI_Send_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request * request) +{ + int retval = 0; + + smpi_bench_end(); + if (request == nullptr) { + retval = MPI_ERR_ARG; + } else if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (not datatype->is_valid()) { + retval = MPI_ERR_TYPE; + } else if (dst == MPI_PROC_NULL) { + retval = MPI_SUCCESS; + } else { + *request = simgrid::smpi::Request::send_init(buf, count, datatype, dst, tag, comm); + retval = MPI_SUCCESS; + } + smpi_bench_begin(); + if (retval != MPI_SUCCESS && request != nullptr) + *request = MPI_REQUEST_NULL; + return retval; +} + +int PMPI_Recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Request * request) +{ + int retval = 0; + + smpi_bench_end(); + if (request == nullptr) { + retval = MPI_ERR_ARG; + } else if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (not datatype->is_valid()) { + retval = MPI_ERR_TYPE; + } else if (src == MPI_PROC_NULL) { + retval = MPI_SUCCESS; + } else { + *request = simgrid::smpi::Request::recv_init(buf, count, datatype, src, tag, comm); + retval = MPI_SUCCESS; + } + smpi_bench_begin(); + if (retval != MPI_SUCCESS && request != nullptr) + *request = MPI_REQUEST_NULL; + return retval; +} + +int PMPI_Ssend_init(void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request* request) +{ + int retval = 0; + + smpi_bench_end(); + if (request == nullptr) { + retval = MPI_ERR_ARG; + } else if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (not datatype->is_valid()) { + retval = MPI_ERR_TYPE; + } else if (dst == MPI_PROC_NULL) { + retval = MPI_SUCCESS; + } else { + *request = simgrid::smpi::Request::ssend_init(buf, count, datatype, dst, tag, comm); + retval = MPI_SUCCESS; + } + smpi_bench_begin(); + if (retval != MPI_SUCCESS && request != nullptr) + *request = MPI_REQUEST_NULL; + return retval; +} + +int PMPI_Start(MPI_Request * request) +{ + int retval = 0; + + smpi_bench_end(); + if (request == nullptr || *request == MPI_REQUEST_NULL) { + retval = MPI_ERR_REQUEST; + } else { + (*request)->start(); + retval = MPI_SUCCESS; + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Startall(int count, MPI_Request * requests) +{ + int retval; + smpi_bench_end(); + if (requests == nullptr) { + retval = MPI_ERR_ARG; + } else { + retval = MPI_SUCCESS; + for (int i = 0; i < count; i++) { + if(requests[i] == MPI_REQUEST_NULL) { + retval = MPI_ERR_REQUEST; + } + } + if(retval != MPI_ERR_REQUEST) { + simgrid::smpi::Request::startall(count, requests); + } + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Request_free(MPI_Request * request) +{ + int retval = 0; + + smpi_bench_end(); + if (*request == MPI_REQUEST_NULL) { + retval = MPI_ERR_ARG; + } else { + simgrid::smpi::Request::unref(request); + retval = MPI_SUCCESS; + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Request * request) +{ + int retval = 0; + + smpi_bench_end(); + + if (request == nullptr) { + retval = MPI_ERR_ARG; + } else if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (src == MPI_PROC_NULL) { + *request = MPI_REQUEST_NULL; + retval = MPI_SUCCESS; + } else if (src!=MPI_ANY_SOURCE && (src >= comm->group()->size() || src <0)){ + retval = MPI_ERR_RANK; + } else if ((count < 0) || (buf==nullptr && count > 0)) { + retval = MPI_ERR_COUNT; + } else if (not datatype->is_valid()) { + retval = MPI_ERR_TYPE; + } else if(tag<0 && tag != MPI_ANY_TAG){ + retval = MPI_ERR_TAG; + } else { + + int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; + int src_traced = comm->group()->index(src); + + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_IRECV; + extra->src = src_traced; + extra->dst = rank; + int known=0; + extra->datatype1 = encode_datatype(datatype, &known); + int dt_size_send = 1; + if(known==0) + dt_size_send = datatype->size(); + extra->send_size = count*dt_size_send; + TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, extra); + + *request = simgrid::smpi::Request::irecv(buf, count, datatype, src, tag, comm); + retval = MPI_SUCCESS; + + TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__); + } + + smpi_bench_begin(); + if (retval != MPI_SUCCESS && request != nullptr) + *request = MPI_REQUEST_NULL; + return retval; +} + + +int PMPI_Isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request * request) +{ + int retval = 0; + + smpi_bench_end(); + if (request == nullptr) { + retval = MPI_ERR_ARG; + } else if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (dst == MPI_PROC_NULL) { + *request = MPI_REQUEST_NULL; + retval = MPI_SUCCESS; + } else if (dst >= comm->group()->size() || dst <0){ + retval = MPI_ERR_RANK; + } else if ((count < 0) || (buf==nullptr && count > 0)) { + retval = MPI_ERR_COUNT; + } else if (not datatype->is_valid()) { + retval = MPI_ERR_TYPE; + } else if(tag<0 && tag != MPI_ANY_TAG){ + retval = MPI_ERR_TAG; + } else { + int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; + int dst_traced = comm->group()->index(dst); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_ISEND; + extra->src = rank; + extra->dst = dst_traced; + int known=0; + extra->datatype1 = encode_datatype(datatype, &known); + int dt_size_send = 1; + if(known==0) + dt_size_send = datatype->size(); + extra->send_size = count*dt_size_send; + TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra); + TRACE_smpi_send(rank, rank, dst_traced, tag, count*datatype->size()); + + *request = simgrid::smpi::Request::isend(buf, count, datatype, dst, tag, comm); + retval = MPI_SUCCESS; + + TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__); + } + + smpi_bench_begin(); + if (retval != MPI_SUCCESS && request!=nullptr) + *request = MPI_REQUEST_NULL; + return retval; +} + +int PMPI_Issend(void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request* request) +{ + int retval = 0; + + smpi_bench_end(); + if (request == nullptr) { + retval = MPI_ERR_ARG; + } else if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (dst == MPI_PROC_NULL) { + *request = MPI_REQUEST_NULL; + retval = MPI_SUCCESS; + } else if (dst >= comm->group()->size() || dst <0){ + retval = MPI_ERR_RANK; + } else if ((count < 0)|| (buf==nullptr && count > 0)) { + retval = MPI_ERR_COUNT; + } else if (not datatype->is_valid()) { + retval = MPI_ERR_TYPE; + } else if(tag<0 && tag != MPI_ANY_TAG){ + retval = MPI_ERR_TAG; + } else { + int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; + int dst_traced = comm->group()->index(dst); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_ISSEND; + extra->src = rank; + extra->dst = dst_traced; + int known=0; + extra->datatype1 = encode_datatype(datatype, &known); + int dt_size_send = 1; + if(known==0) + dt_size_send = datatype->size(); + extra->send_size = count*dt_size_send; + TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra); + TRACE_smpi_send(rank, rank, dst_traced, tag, count*datatype->size()); + + *request = simgrid::smpi::Request::issend(buf, count, datatype, dst, tag, comm); + retval = MPI_SUCCESS; + + TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__); + } + + smpi_bench_begin(); + if (retval != MPI_SUCCESS && request!=nullptr) + *request = MPI_REQUEST_NULL; + return retval; +} + +int PMPI_Recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status) +{ + int retval = 0; + + smpi_bench_end(); + if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (src == MPI_PROC_NULL) { + simgrid::smpi::Status::empty(status); + status->MPI_SOURCE = MPI_PROC_NULL; + retval = MPI_SUCCESS; + } else if (src!=MPI_ANY_SOURCE && (src >= comm->group()->size() || src <0)){ + retval = MPI_ERR_RANK; + } else if ((count < 0) || (buf==nullptr && count > 0)) { + retval = MPI_ERR_COUNT; + } else if (not datatype->is_valid()) { + retval = MPI_ERR_TYPE; + } else if(tag<0 && tag != MPI_ANY_TAG){ + retval = MPI_ERR_TAG; + } else { + int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; + int src_traced = comm->group()->index(src); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); + extra->type = TRACING_RECV; + extra->src = src_traced; + extra->dst = rank; + int known = 0; + extra->datatype1 = encode_datatype(datatype, &known); + int dt_size_send = 1; + if (known == 0) + dt_size_send = datatype->size(); + extra->send_size = count * dt_size_send; + TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, extra); + + simgrid::smpi::Request::recv(buf, count, datatype, src, tag, comm, status); + retval = MPI_SUCCESS; + + // the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE) + if (status != MPI_STATUS_IGNORE) { + src_traced = comm->group()->index(status->MPI_SOURCE); + if (not TRACE_smpi_view_internals()) { + TRACE_smpi_recv(rank, src_traced, rank, tag); + } + } + TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__); + } + + smpi_bench_begin(); + return retval; +} + +int PMPI_Send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) +{ + int retval = 0; + + smpi_bench_end(); + + if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (dst == MPI_PROC_NULL) { + retval = MPI_SUCCESS; + } else if (dst >= comm->group()->size() || dst <0){ + retval = MPI_ERR_RANK; + } else if ((count < 0) || (buf == nullptr && count > 0)) { + retval = MPI_ERR_COUNT; + } else if (not datatype->is_valid()) { + retval = MPI_ERR_TYPE; + } else if(tag < 0 && tag != MPI_ANY_TAG){ + retval = MPI_ERR_TAG; + } else { + int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; + int dst_traced = comm->group()->index(dst); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_SEND; + extra->src = rank; + extra->dst = dst_traced; + int known = 0; + extra->datatype1 = encode_datatype(datatype, &known); + int dt_size_send = 1; + if (known == 0) { + dt_size_send = datatype->size(); + } + extra->send_size = count*dt_size_send; + TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra); + if (not TRACE_smpi_view_internals()) { + TRACE_smpi_send(rank, rank, dst_traced, tag,count*datatype->size()); + } + + simgrid::smpi::Request::send(buf, count, datatype, dst, tag, comm); + retval = MPI_SUCCESS; + + TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__); + } + + smpi_bench_begin(); + return retval; +} + +int PMPI_Ssend(void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { + int retval = 0; + + smpi_bench_end(); + + if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (dst == MPI_PROC_NULL) { + retval = MPI_SUCCESS; + } else if (dst >= comm->group()->size() || dst <0){ + retval = MPI_ERR_RANK; + } else if ((count < 0) || (buf==nullptr && count > 0)) { + retval = MPI_ERR_COUNT; + } else if (not datatype->is_valid()) { + retval = MPI_ERR_TYPE; + } else if(tag<0 && tag != MPI_ANY_TAG){ + retval = MPI_ERR_TAG; + } else { + int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; + int dst_traced = comm->group()->index(dst); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_SSEND; + extra->src = rank; + extra->dst = dst_traced; + int known = 0; + extra->datatype1 = encode_datatype(datatype, &known); + int dt_size_send = 1; + if(known == 0) { + dt_size_send = datatype->size(); + } + extra->send_size = count*dt_size_send; + TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra); + TRACE_smpi_send(rank, rank, dst_traced, tag,count*datatype->size()); + + simgrid::smpi::Request::ssend(buf, count, datatype, dst, tag, comm); + retval = MPI_SUCCESS; + + TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__); + } + + smpi_bench_begin(); + return retval; +} + +int PMPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dst, int sendtag, void *recvbuf, + int recvcount, MPI_Datatype recvtype, int src, int recvtag, MPI_Comm comm, MPI_Status * status) +{ + int retval = 0; + + smpi_bench_end(); + + if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (not sendtype->is_valid() || not recvtype->is_valid()) { + retval = MPI_ERR_TYPE; + } else if (src == MPI_PROC_NULL || dst == MPI_PROC_NULL) { + simgrid::smpi::Status::empty(status); + status->MPI_SOURCE = MPI_PROC_NULL; + retval = MPI_SUCCESS; + }else if (dst >= comm->group()->size() || dst <0 || + (src!=MPI_ANY_SOURCE && (src >= comm->group()->size() || src <0))){ + retval = MPI_ERR_RANK; + } else if ((sendcount < 0 || recvcount<0) || + (sendbuf==nullptr && sendcount > 0) || (recvbuf==nullptr && recvcount>0)) { + retval = MPI_ERR_COUNT; + } else if((sendtag<0 && sendtag != MPI_ANY_TAG)||(recvtag<0 && recvtag != MPI_ANY_TAG)){ + retval = MPI_ERR_TAG; + } else { + + int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; + int dst_traced = comm->group()->index(dst); + int src_traced = comm->group()->index(src); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_SENDRECV; + extra->src = src_traced; + extra->dst = dst_traced; + int known=0; + extra->datatype1 = encode_datatype(sendtype, &known); + int dt_size_send = 1; + if(known==0) + dt_size_send = sendtype->size(); + extra->send_size = sendcount*dt_size_send; + extra->datatype2 = encode_datatype(recvtype, &known); + int dt_size_recv = 1; + if(known==0) + dt_size_recv = recvtype->size(); + extra->recv_size = recvcount*dt_size_recv; + + TRACE_smpi_ptp_in(rank, src_traced, dst_traced, __FUNCTION__, extra); + TRACE_smpi_send(rank, rank, dst_traced, sendtag,sendcount*sendtype->size()); + + simgrid::smpi::Request::sendrecv(sendbuf, sendcount, sendtype, dst, sendtag, recvbuf, recvcount, recvtype, src, recvtag, comm, + status); + retval = MPI_SUCCESS; + + TRACE_smpi_ptp_out(rank, src_traced, dst_traced, __FUNCTION__); + TRACE_smpi_recv(rank, src_traced, rank, recvtag); + } + + smpi_bench_begin(); + return retval; +} + +int PMPI_Sendrecv_replace(void* buf, int count, MPI_Datatype datatype, int dst, int sendtag, int src, int recvtag, + MPI_Comm comm, MPI_Status* status) +{ + int retval = 0; + if (not datatype->is_valid()) { + return MPI_ERR_TYPE; + } else if (count < 0) { + return MPI_ERR_COUNT; + } else { + int size = datatype->get_extent() * count; + void* recvbuf = xbt_new0(char, size); + retval = MPI_Sendrecv(buf, count, datatype, dst, sendtag, recvbuf, count, datatype, src, recvtag, comm, status); + if(retval==MPI_SUCCESS){ + simgrid::smpi::Datatype::copy(recvbuf, count, datatype, buf, count, datatype); + } + xbt_free(recvbuf); + + } + return retval; +} + +int PMPI_Test(MPI_Request * request, int *flag, MPI_Status * status) +{ + int retval = 0; + smpi_bench_end(); + if (request == nullptr || flag == nullptr) { + retval = MPI_ERR_ARG; + } else if (*request == MPI_REQUEST_NULL) { + *flag= true; + simgrid::smpi::Status::empty(status); + retval = MPI_SUCCESS; + } else { + int rank = ((*request)->comm() != MPI_COMM_NULL) ? smpi_process()->index() : -1; + + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_TEST; + TRACE_smpi_testing_in(rank, extra); + + *flag = simgrid::smpi::Request::test(request,status); + + TRACE_smpi_testing_out(rank); + retval = MPI_SUCCESS; + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Testany(int count, MPI_Request requests[], int *index, int *flag, MPI_Status * status) +{ + int retval = 0; + + smpi_bench_end(); + if (index == nullptr || flag == nullptr) { + retval = MPI_ERR_ARG; + } else { + *flag = simgrid::smpi::Request::testany(count, requests, index, status); + retval = MPI_SUCCESS; + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Testall(int count, MPI_Request* requests, int* flag, MPI_Status* statuses) +{ + int retval = 0; + + smpi_bench_end(); + if (flag == nullptr) { + retval = MPI_ERR_ARG; + } else { + *flag = simgrid::smpi::Request::testall(count, requests, statuses); + retval = MPI_SUCCESS; + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Probe(int source, int tag, MPI_Comm comm, MPI_Status* status) { + int retval = 0; + smpi_bench_end(); + + if (status == nullptr) { + retval = MPI_ERR_ARG; + } else if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (source == MPI_PROC_NULL) { + simgrid::smpi::Status::empty(status); + status->MPI_SOURCE = MPI_PROC_NULL; + retval = MPI_SUCCESS; + } else { + simgrid::smpi::Request::probe(source, tag, comm, status); + retval = MPI_SUCCESS; + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status) { + int retval = 0; + smpi_bench_end(); + + if (flag == nullptr) { + retval = MPI_ERR_ARG; + } else if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (source == MPI_PROC_NULL) { + *flag=true; + simgrid::smpi::Status::empty(status); + status->MPI_SOURCE = MPI_PROC_NULL; + retval = MPI_SUCCESS; + } else { + simgrid::smpi::Request::iprobe(source, tag, comm, flag, status); + retval = MPI_SUCCESS; + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Wait(MPI_Request * request, MPI_Status * status) +{ + int retval = 0; + + smpi_bench_end(); + + simgrid::smpi::Status::empty(status); + + if (request == nullptr) { + retval = MPI_ERR_ARG; + } else if (*request == MPI_REQUEST_NULL) { + retval = MPI_SUCCESS; + } else { + + int rank = (request!=nullptr && (*request)->comm() != MPI_COMM_NULL) ? smpi_process()->index() : -1; + + int src_traced = (*request)->src(); + int dst_traced = (*request)->dst(); + int tag_traced= (*request)->tag(); + MPI_Comm comm = (*request)->comm(); + int is_wait_for_receive = ((*request)->flags() & RECV); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_WAIT; + TRACE_smpi_ptp_in(rank, src_traced, dst_traced, __FUNCTION__, extra); + + simgrid::smpi::Request::wait(request, status); + retval = MPI_SUCCESS; + + //the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE) + TRACE_smpi_ptp_out(rank, src_traced, dst_traced, __FUNCTION__); + if (is_wait_for_receive) { + if(src_traced==MPI_ANY_SOURCE) + src_traced = (status!=MPI_STATUS_IGNORE) ? + comm->group()->rank(status->MPI_SOURCE) : + src_traced; + TRACE_smpi_recv(rank, src_traced, dst_traced, tag_traced); + } + } + + smpi_bench_begin(); + return retval; +} + +int PMPI_Waitany(int count, MPI_Request requests[], int *index, MPI_Status * status) +{ + if (index == nullptr) + return MPI_ERR_ARG; + + smpi_bench_end(); + //save requests information for tracing + typedef struct { + int src; + int dst; + int recv; + int tag; + MPI_Comm comm; + } savedvalstype; + savedvalstype* savedvals=nullptr; + if(count>0){ + savedvals = xbt_new0(savedvalstype, count); + } + for (int i = 0; i < count; i++) { + MPI_Request req = requests[i]; //already received requests are no longer valid + if (req) { + savedvals[i]=(savedvalstype){req->src(), req->dst(), (req->flags() & RECV), req->tag(), req->comm()}; + } + } + int rank_traced = smpi_process()->index(); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_WAITANY; + extra->send_size=count; + TRACE_smpi_ptp_in(rank_traced, -1, -1, __FUNCTION__,extra); + + *index = simgrid::smpi::Request::waitany(count, requests, status); + + if(*index!=MPI_UNDEFINED){ + int src_traced = savedvals[*index].src; + //the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE) + int dst_traced = savedvals[*index].dst; + int is_wait_for_receive = savedvals[*index].recv; + if (is_wait_for_receive) { + if(savedvals[*index].src==MPI_ANY_SOURCE) + src_traced = (status != MPI_STATUSES_IGNORE) + ? savedvals[*index].comm->group()->rank(status->MPI_SOURCE) + : savedvals[*index].src; + TRACE_smpi_recv(rank_traced, src_traced, dst_traced, savedvals[*index].tag); + } + TRACE_smpi_ptp_out(rank_traced, src_traced, dst_traced, __FUNCTION__); + } + xbt_free(savedvals); + + smpi_bench_begin(); + return MPI_SUCCESS; +} + +int PMPI_Waitall(int count, MPI_Request requests[], MPI_Status status[]) +{ + smpi_bench_end(); + //save information from requests + typedef struct { + int src; + int dst; + int recv; + int tag; + int valid; + MPI_Comm comm; + } savedvalstype; + savedvalstype* savedvals=xbt_new0(savedvalstype, count); + + for (int i = 0; i < count; i++) { + MPI_Request req = requests[i]; + if(req!=MPI_REQUEST_NULL){ + savedvals[i]=(savedvalstype){req->src(), req->dst(), (req->flags() & RECV), req->tag(), 1, req->comm()}; + }else{ + savedvals[i].valid=0; + } + } + int rank_traced = smpi_process()->index(); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_WAITALL; + extra->send_size=count; + TRACE_smpi_ptp_in(rank_traced, -1, -1, __FUNCTION__,extra); + + int retval = simgrid::smpi::Request::waitall(count, requests, status); + + for (int i = 0; i < count; i++) { + if(savedvals[i].valid){ + //the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE) + int src_traced = savedvals[i].src; + int dst_traced = savedvals[i].dst; + int is_wait_for_receive = savedvals[i].recv; + if (is_wait_for_receive) { + if(src_traced==MPI_ANY_SOURCE) + src_traced = (status!=MPI_STATUSES_IGNORE) ? + savedvals[i].comm->group()->rank(status[i].MPI_SOURCE) : savedvals[i].src; + TRACE_smpi_recv(rank_traced, src_traced, dst_traced,savedvals[i].tag); + } + } + } + TRACE_smpi_ptp_out(rank_traced, -1, -1, __FUNCTION__); + xbt_free(savedvals); + + smpi_bench_begin(); + return retval; +} + +int PMPI_Waitsome(int incount, MPI_Request requests[], int *outcount, int *indices, MPI_Status status[]) +{ + int retval = 0; + + smpi_bench_end(); + if (outcount == nullptr) { + retval = MPI_ERR_ARG; + } else { + *outcount = simgrid::smpi::Request::waitsome(incount, requests, indices, status); + retval = MPI_SUCCESS; + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Testsome(int incount, MPI_Request requests[], int* outcount, int* indices, MPI_Status status[]) +{ + int retval = 0; + + smpi_bench_end(); + if (outcount == nullptr) { + retval = MPI_ERR_ARG; + } else { + *outcount = simgrid::smpi::Request::testsome(incount, requests, indices, status); + retval = MPI_SUCCESS; + } + smpi_bench_begin(); + return retval; +} + +MPI_Request PMPI_Request_f2c(MPI_Fint request){ + return static_cast(simgrid::smpi::Request::f2c(request)); +} + +MPI_Fint PMPI_Request_c2f(MPI_Request request) { + return request->c2f(); +} + +} diff --git a/src/smpi/bindings/smpi_pmpi_topo.cpp b/src/smpi/bindings/smpi_pmpi_topo.cpp new file mode 100644 index 0000000000..6864b0f82e --- /dev/null +++ b/src/smpi/bindings/smpi_pmpi_topo.cpp @@ -0,0 +1,136 @@ +/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +#include "private.h" +#include "smpi_comm.hpp" + +XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi); + +/* PMPI User level calls */ +extern "C" { // Obviously, the C MPI interface should use the C linkage + +/* The topo part of MPI_COMM_WORLD should always be nullptr. When other topologies will be implemented, not only should we + * check if the topology is nullptr, but we should check if it is the good topology type (so we have to add a + * MPIR_Topo_Type field, and replace the MPI_Topology field by an union)*/ + +int PMPI_Cart_create(MPI_Comm comm_old, int ndims, int* dims, int* periodic, int reorder, MPI_Comm* comm_cart) { + if (comm_old == MPI_COMM_NULL){ + return MPI_ERR_COMM; + } else if (ndims < 0 || (ndims > 0 && (dims == nullptr || periodic == nullptr)) || comm_cart == nullptr) { + return MPI_ERR_ARG; + } else{ + simgrid::smpi::Topo_Cart* topo = new simgrid::smpi::Topo_Cart(comm_old, ndims, dims, periodic, reorder, comm_cart); + if(*comm_cart==MPI_COMM_NULL) + delete topo; + return MPI_SUCCESS; + } +} + +int PMPI_Cart_rank(MPI_Comm comm, int* coords, int* rank) { + if(comm == MPI_COMM_NULL || comm->topo() == nullptr) { + return MPI_ERR_TOPOLOGY; + } + if (coords == nullptr) { + return MPI_ERR_ARG; + } + MPIR_Cart_Topology topo = static_cast(comm->topo()); + if (topo==nullptr) { + return MPI_ERR_ARG; + } + return topo->rank(coords, rank); +} + +int PMPI_Cart_shift(MPI_Comm comm, int direction, int displ, int* source, int* dest) { + if(comm == MPI_COMM_NULL || comm->topo() == nullptr) { + return MPI_ERR_TOPOLOGY; + } + if (source == nullptr || dest == nullptr || direction < 0 ) { + return MPI_ERR_ARG; + } + MPIR_Cart_Topology topo = static_cast(comm->topo()); + if (topo==nullptr) { + return MPI_ERR_ARG; + } + return topo->shift(direction, displ, source, dest); +} + +int PMPI_Cart_coords(MPI_Comm comm, int rank, int maxdims, int* coords) { + if(comm == MPI_COMM_NULL || comm->topo() == nullptr) { + return MPI_ERR_TOPOLOGY; + } + if (rank < 0 || rank >= comm->size()) { + return MPI_ERR_RANK; + } + if (maxdims <= 0) { + return MPI_ERR_ARG; + } + if(coords == nullptr) { + return MPI_ERR_ARG; + } + MPIR_Cart_Topology topo = static_cast(comm->topo()); + if (topo==nullptr) { + return MPI_ERR_ARG; + } + return topo->coords(rank, maxdims, coords); +} + +int PMPI_Cart_get(MPI_Comm comm, int maxdims, int* dims, int* periods, int* coords) { + if(comm == nullptr || comm->topo() == nullptr) { + return MPI_ERR_TOPOLOGY; + } + if(maxdims <= 0 || dims == nullptr || periods == nullptr || coords == nullptr) { + return MPI_ERR_ARG; + } + MPIR_Cart_Topology topo = static_cast(comm->topo()); + if (topo==nullptr) { + return MPI_ERR_ARG; + } + return topo->get(maxdims, dims, periods, coords); +} + +int PMPI_Cartdim_get(MPI_Comm comm, int* ndims) { + if (comm == MPI_COMM_NULL || comm->topo() == nullptr) { + return MPI_ERR_TOPOLOGY; + } + if (ndims == nullptr) { + return MPI_ERR_ARG; + } + MPIR_Cart_Topology topo = static_cast(comm->topo()); + if (topo==nullptr) { + return MPI_ERR_ARG; + } + return topo->dim_get(ndims); +} + +int PMPI_Dims_create(int nnodes, int ndims, int* dims) { + if(dims == nullptr) { + return MPI_ERR_ARG; + } + if (ndims < 1 || nnodes < 1) { + return MPI_ERR_DIMS; + } + return simgrid::smpi::Topo_Cart::Dims_create(nnodes, ndims, dims); +} + +int PMPI_Cart_sub(MPI_Comm comm, int* remain_dims, MPI_Comm* comm_new) { + if(comm == MPI_COMM_NULL || comm->topo() == nullptr) { + return MPI_ERR_TOPOLOGY; + } + if (comm_new == nullptr) { + return MPI_ERR_ARG; + } + MPIR_Cart_Topology topo = static_cast(comm->topo()); + if (topo==nullptr) { + return MPI_ERR_ARG; + } + MPIR_Cart_Topology cart = topo->sub(remain_dims, comm_new); + if(*comm_new==MPI_COMM_NULL) + delete cart; + if(cart==nullptr) + return MPI_ERR_ARG; + return MPI_SUCCESS; +} + +} diff --git a/src/smpi/bindings/smpi_pmpi_type.cpp b/src/smpi/bindings/smpi_pmpi_type.cpp new file mode 100644 index 0000000000..e150b2b418 --- /dev/null +++ b/src/smpi/bindings/smpi_pmpi_type.cpp @@ -0,0 +1,357 @@ +/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +#include "private.h" +#include "smpi_datatype_derived.hpp" + +XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi); + +/* PMPI User level calls */ +extern "C" { // Obviously, the C MPI interface should use the C linkage + +int PMPI_Type_free(MPI_Datatype * datatype) +{ + /* Free a predefined datatype is an error according to the standard, and should be checked for */ + if (*datatype == MPI_DATATYPE_NULL) { + return MPI_ERR_ARG; + } else { + simgrid::smpi::Datatype::unref(*datatype); + return MPI_SUCCESS; + } +} + +int PMPI_Type_size(MPI_Datatype datatype, int *size) +{ + if (datatype == MPI_DATATYPE_NULL) { + return MPI_ERR_TYPE; + } else if (size == nullptr) { + return MPI_ERR_ARG; + } else { + *size = static_cast(datatype->size()); + return MPI_SUCCESS; + } +} + +int PMPI_Type_size_x(MPI_Datatype datatype, MPI_Count *size) +{ + if (datatype == MPI_DATATYPE_NULL) { + return MPI_ERR_TYPE; + } else if (size == nullptr) { + return MPI_ERR_ARG; + } else { + *size = static_cast(datatype->size()); + return MPI_SUCCESS; + } +} + +int PMPI_Type_get_extent(MPI_Datatype datatype, MPI_Aint * lb, MPI_Aint * extent) +{ + if (datatype == MPI_DATATYPE_NULL) { + return MPI_ERR_TYPE; + } else if (lb == nullptr || extent == nullptr) { + return MPI_ERR_ARG; + } else { + return datatype->extent(lb, extent); + } +} + +int PMPI_Type_get_true_extent(MPI_Datatype datatype, MPI_Aint * lb, MPI_Aint * extent) +{ + return PMPI_Type_get_extent(datatype, lb, extent); +} + +int PMPI_Type_extent(MPI_Datatype datatype, MPI_Aint * extent) +{ + if (datatype == MPI_DATATYPE_NULL) { + return MPI_ERR_TYPE; + } else if (extent == nullptr) { + return MPI_ERR_ARG; + } else { + *extent = datatype->get_extent(); + return MPI_SUCCESS; + } +} + +int PMPI_Type_lb(MPI_Datatype datatype, MPI_Aint * disp) +{ + if (datatype == MPI_DATATYPE_NULL) { + return MPI_ERR_TYPE; + } else if (disp == nullptr) { + return MPI_ERR_ARG; + } else { + *disp = datatype->lb(); + return MPI_SUCCESS; + } +} + +int PMPI_Type_ub(MPI_Datatype datatype, MPI_Aint * disp) +{ + if (datatype == MPI_DATATYPE_NULL) { + return MPI_ERR_TYPE; + } else if (disp == nullptr) { + return MPI_ERR_ARG; + } else { + *disp = datatype->ub(); + return MPI_SUCCESS; + } +} + +int PMPI_Type_dup(MPI_Datatype datatype, MPI_Datatype *newtype){ + int retval = MPI_SUCCESS; + if (datatype == MPI_DATATYPE_NULL) { + retval=MPI_ERR_TYPE; + } else { + *newtype = new simgrid::smpi::Datatype(datatype, &retval); + //error when duplicating, free the new datatype + if(retval!=MPI_SUCCESS){ + simgrid::smpi::Datatype::unref(*newtype); + *newtype = MPI_DATATYPE_NULL; + } + } + return retval; +} + +int PMPI_Type_contiguous(int count, MPI_Datatype old_type, MPI_Datatype* new_type) { + if (old_type == MPI_DATATYPE_NULL) { + return MPI_ERR_TYPE; + } else if (count<0){ + return MPI_ERR_COUNT; + } else { + return simgrid::smpi::Datatype::create_contiguous(count, old_type, 0, new_type); + } +} + +int PMPI_Type_commit(MPI_Datatype* datatype) { + if (datatype == nullptr || *datatype == MPI_DATATYPE_NULL) { + return MPI_ERR_TYPE; + } else { + (*datatype)->commit(); + return MPI_SUCCESS; + } +} + +int PMPI_Type_vector(int count, int blocklen, int stride, MPI_Datatype old_type, MPI_Datatype* new_type) { + if (old_type == MPI_DATATYPE_NULL) { + return MPI_ERR_TYPE; + } else if (count<0 || blocklen<0){ + return MPI_ERR_COUNT; + } else { + return simgrid::smpi::Datatype::create_vector(count, blocklen, stride, old_type, new_type); + } +} + +int PMPI_Type_hvector(int count, int blocklen, MPI_Aint stride, MPI_Datatype old_type, MPI_Datatype* new_type) { + if (old_type == MPI_DATATYPE_NULL) { + return MPI_ERR_TYPE; + } else if (count<0 || blocklen<0){ + return MPI_ERR_COUNT; + } else { + return simgrid::smpi::Datatype::create_hvector(count, blocklen, stride, old_type, new_type); + } +} + +int PMPI_Type_create_hvector(int count, int blocklen, MPI_Aint stride, MPI_Datatype old_type, MPI_Datatype* new_type) { + return MPI_Type_hvector(count, blocklen, stride, old_type, new_type); +} + +int PMPI_Type_indexed(int count, int* blocklens, int* indices, MPI_Datatype old_type, MPI_Datatype* new_type) { + if (old_type == MPI_DATATYPE_NULL) { + return MPI_ERR_TYPE; + } else if (count<0){ + return MPI_ERR_COUNT; + } else { + return simgrid::smpi::Datatype::create_indexed(count, blocklens, indices, old_type, new_type); + } +} + +int PMPI_Type_create_indexed(int count, int* blocklens, int* indices, MPI_Datatype old_type, MPI_Datatype* new_type) { + if (old_type == MPI_DATATYPE_NULL) { + return MPI_ERR_TYPE; + } else if (count<0){ + return MPI_ERR_COUNT; + } else { + return simgrid::smpi::Datatype::create_indexed(count, blocklens, indices, old_type, new_type); + } +} + +int PMPI_Type_create_indexed_block(int count, int blocklength, int* indices, MPI_Datatype old_type, + MPI_Datatype* new_type) +{ + if (old_type == MPI_DATATYPE_NULL) { + return MPI_ERR_TYPE; + } else if (count<0){ + return MPI_ERR_COUNT; + } else { + int* blocklens=static_cast(xbt_malloc(blocklength*count*sizeof(int))); + for (int i = 0; i < count; i++) + blocklens[i]=blocklength; + int retval = simgrid::smpi::Datatype::create_indexed(count, blocklens, indices, old_type, new_type); + xbt_free(blocklens); + return retval; + } +} + +int PMPI_Type_hindexed(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype old_type, MPI_Datatype* new_type) +{ + if (old_type == MPI_DATATYPE_NULL) { + return MPI_ERR_TYPE; + } else if (count<0){ + return MPI_ERR_COUNT; + } else { + return simgrid::smpi::Datatype::create_hindexed(count, blocklens, indices, old_type, new_type); + } +} + +int PMPI_Type_create_hindexed(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype old_type, + MPI_Datatype* new_type) { + return PMPI_Type_hindexed(count, blocklens,indices,old_type,new_type); +} + +int PMPI_Type_create_hindexed_block(int count, int blocklength, MPI_Aint* indices, MPI_Datatype old_type, + MPI_Datatype* new_type) { + if (old_type == MPI_DATATYPE_NULL) { + return MPI_ERR_TYPE; + } else if (count<0){ + return MPI_ERR_COUNT; + } else { + int* blocklens=(int*)xbt_malloc(blocklength*count*sizeof(int)); + for (int i = 0; i < count; i++) + blocklens[i] = blocklength; + int retval = simgrid::smpi::Datatype::create_hindexed(count, blocklens, indices, old_type, new_type); + xbt_free(blocklens); + return retval; + } +} + +int PMPI_Type_struct(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype* old_types, MPI_Datatype* new_type) { + if (count<0){ + return MPI_ERR_COUNT; + } else { + return simgrid::smpi::Datatype::create_struct(count, blocklens, indices, old_types, new_type); + } +} + +int PMPI_Type_create_struct(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype* old_types, + MPI_Datatype* new_type) { + return PMPI_Type_struct(count, blocklens, indices, old_types, new_type); +} + +int PMPI_Type_create_resized(MPI_Datatype oldtype,MPI_Aint lb, MPI_Aint extent, MPI_Datatype *newtype){ + if (oldtype == MPI_DATATYPE_NULL) { + return MPI_ERR_TYPE; + } + int blocks[3] = {1, 1, 1}; + MPI_Aint disps[3] = {lb, 0, lb + extent}; + MPI_Datatype types[3] = {MPI_LB, oldtype, MPI_UB}; + + *newtype = new simgrid::smpi::Type_Struct(oldtype->size(), lb, lb + extent, DT_FLAG_DERIVED, 3, blocks, disps, types); + + (*newtype)->addflag(~DT_FLAG_COMMITED); + return MPI_SUCCESS; +} + + +int PMPI_Type_set_name(MPI_Datatype datatype, char * name) +{ + if (datatype == MPI_DATATYPE_NULL) { + return MPI_ERR_TYPE; + } else if (name == nullptr) { + return MPI_ERR_ARG; + } else { + datatype->set_name(name); + return MPI_SUCCESS; + } +} + +int PMPI_Type_get_name(MPI_Datatype datatype, char * name, int* len) +{ + if (datatype == MPI_DATATYPE_NULL) { + return MPI_ERR_TYPE; + } else if (name == nullptr) { + return MPI_ERR_ARG; + } else { + datatype->get_name(name, len); + return MPI_SUCCESS; + } +} + +MPI_Datatype PMPI_Type_f2c(MPI_Fint datatype){ + return static_cast(simgrid::smpi::F2C::f2c(datatype)); +} + +MPI_Fint PMPI_Type_c2f(MPI_Datatype datatype){ + return datatype->c2f(); +} + +int PMPI_Type_get_attr (MPI_Datatype type, int type_keyval, void *attribute_val, int* flag) +{ + if (type==MPI_DATATYPE_NULL) + return MPI_ERR_TYPE; + else + return type->attr_get(type_keyval, attribute_val, flag); +} + +int PMPI_Type_set_attr (MPI_Datatype type, int type_keyval, void *attribute_val) +{ + if (type==MPI_DATATYPE_NULL) + return MPI_ERR_TYPE; + else + return type->attr_put(type_keyval, attribute_val); +} + +int PMPI_Type_delete_attr (MPI_Datatype type, int type_keyval) +{ + if (type==MPI_DATATYPE_NULL) + return MPI_ERR_TYPE; + else + return type->attr_delete(type_keyval); +} + +int PMPI_Type_create_keyval(MPI_Type_copy_attr_function* copy_fn, MPI_Type_delete_attr_function* delete_fn, int* keyval, + void* extra_state) +{ + smpi_copy_fn _copy_fn={nullptr,copy_fn,nullptr}; + smpi_delete_fn _delete_fn={nullptr,delete_fn,nullptr}; + return simgrid::smpi::Keyval::keyval_create(_copy_fn, _delete_fn, keyval, extra_state); +} + +int PMPI_Type_free_keyval(int* keyval) { + return simgrid::smpi::Keyval::keyval_free(keyval); +} + +int PMPI_Unpack(void* inbuf, int incount, int* position, void* outbuf, int outcount, MPI_Datatype type, MPI_Comm comm) { + if(incount<0 || outcount < 0 || inbuf==nullptr || outbuf==nullptr) + return MPI_ERR_ARG; + if (not type->is_valid()) + return MPI_ERR_TYPE; + if(comm==MPI_COMM_NULL) + return MPI_ERR_COMM; + return type->unpack(inbuf, incount, position, outbuf,outcount, comm); +} + +int PMPI_Pack(void* inbuf, int incount, MPI_Datatype type, void* outbuf, int outcount, int* position, MPI_Comm comm) { + if(incount<0 || outcount < 0|| inbuf==nullptr || outbuf==nullptr) + return MPI_ERR_ARG; + if (not type->is_valid()) + return MPI_ERR_TYPE; + if(comm==MPI_COMM_NULL) + return MPI_ERR_COMM; + return type->pack(inbuf, incount, outbuf,outcount,position, comm); +} + +int PMPI_Pack_size(int incount, MPI_Datatype datatype, MPI_Comm comm, int* size) { + if(incount<0) + return MPI_ERR_ARG; + if (not datatype->is_valid()) + return MPI_ERR_TYPE; + if(comm==MPI_COMM_NULL) + return MPI_ERR_COMM; + + *size=incount*datatype->size(); + + return MPI_SUCCESS; +} + +} diff --git a/src/smpi/bindings/smpi_pmpi_win.cpp b/src/smpi/bindings/smpi_pmpi_win.cpp new file mode 100644 index 0000000000..aa82058617 --- /dev/null +++ b/src/smpi/bindings/smpi_pmpi_win.cpp @@ -0,0 +1,783 @@ +/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +#include "private.h" +#include "smpi_comm.hpp" +#include "smpi_coll.hpp" +#include "smpi_datatype_derived.hpp" +#include "smpi_op.hpp" +#include "smpi_process.hpp" +#include "smpi_win.hpp" + +XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi); + +/* PMPI User level calls */ +extern "C" { // Obviously, the C MPI interface should use the C linkage + +int PMPI_Win_create( void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, MPI_Win *win){ + int retval = 0; + smpi_bench_end(); + if (comm == MPI_COMM_NULL) { + retval= MPI_ERR_COMM; + }else if ((base == nullptr && size != 0) || disp_unit <= 0 || size < 0 ){ + retval= MPI_ERR_OTHER; + }else{ + *win = new simgrid::smpi::Win( base, size, disp_unit, info, comm); + retval = MPI_SUCCESS; + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Win_allocate( MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, void *base, MPI_Win *win){ + int retval = 0; + smpi_bench_end(); + if (comm == MPI_COMM_NULL) { + retval= MPI_ERR_COMM; + }else if (disp_unit <= 0 || size < 0 ){ + retval= MPI_ERR_OTHER; + }else{ + void* ptr = xbt_malloc(size); + if(ptr==nullptr) + return MPI_ERR_NO_MEM; + *static_cast(base) = ptr; + *win = new simgrid::smpi::Win( ptr, size, disp_unit, info, comm,1); + retval = MPI_SUCCESS; + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Win_create_dynamic( MPI_Info info, MPI_Comm comm, MPI_Win *win){ + int retval = 0; + smpi_bench_end(); + if (comm == MPI_COMM_NULL) { + retval= MPI_ERR_COMM; + }else{ + *win = new simgrid::smpi::Win(info, comm); + retval = MPI_SUCCESS; + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Win_attach(MPI_Win win, void *base, MPI_Aint size){ + int retval = 0; + smpi_bench_end(); + if(win == MPI_WIN_NULL){ + retval = MPI_ERR_WIN; + } else if ((base == nullptr && size != 0) || size < 0 ){ + retval= MPI_ERR_OTHER; + }else{ + retval = win->attach(base, size); + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Win_detach(MPI_Win win, void *base){ + int retval = 0; + smpi_bench_end(); + if(win == MPI_WIN_NULL){ + retval = MPI_ERR_WIN; + } else if (base == nullptr){ + retval= MPI_ERR_OTHER; + }else{ + retval = win->detach(base); + } + smpi_bench_begin(); + return retval; +} + + +int PMPI_Win_free( MPI_Win* win){ + int retval = 0; + smpi_bench_end(); + if (win == nullptr || *win == MPI_WIN_NULL) { + retval = MPI_ERR_WIN; + }else{ + delete *win; + retval=MPI_SUCCESS; + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Win_set_name(MPI_Win win, char * name) +{ + if (win == MPI_WIN_NULL) { + return MPI_ERR_TYPE; + } else if (name == nullptr) { + return MPI_ERR_ARG; + } else { + win->set_name(name); + return MPI_SUCCESS; + } +} + +int PMPI_Win_get_name(MPI_Win win, char * name, int* len) +{ + if (win == MPI_WIN_NULL) { + return MPI_ERR_WIN; + } else if (name == nullptr) { + return MPI_ERR_ARG; + } else { + win->get_name(name, len); + return MPI_SUCCESS; + } +} + +int PMPI_Win_get_info(MPI_Win win, MPI_Info* info) +{ + if (win == MPI_WIN_NULL) { + return MPI_ERR_WIN; + } else { + *info = win->info(); + return MPI_SUCCESS; + } +} + +int PMPI_Win_set_info(MPI_Win win, MPI_Info info) +{ + if (win == MPI_WIN_NULL) { + return MPI_ERR_TYPE; + } else { + win->set_info(info); + return MPI_SUCCESS; + } +} + +int PMPI_Win_get_group(MPI_Win win, MPI_Group * group){ + if (win == MPI_WIN_NULL) { + return MPI_ERR_WIN; + }else { + win->get_group(group); + (*group)->ref(); + return MPI_SUCCESS; + } +} + +int PMPI_Win_fence( int assert, MPI_Win win){ + int retval = 0; + smpi_bench_end(); + if (win == MPI_WIN_NULL) { + retval = MPI_ERR_WIN; + } else { + int rank = smpi_process()->index(); + TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr); + retval = win->fence(assert); + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, + MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win){ + int retval = 0; + smpi_bench_end(); + if (win == MPI_WIN_NULL) { + retval = MPI_ERR_WIN; + } else if (target_rank == MPI_PROC_NULL) { + retval = MPI_SUCCESS; + } else if (target_rank <0){ + retval = MPI_ERR_RANK; + } else if (win->dynamic()==0 && target_disp <0){ + //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address + retval = MPI_ERR_ARG; + } else if ((origin_count < 0 || target_count < 0) || + (origin_addr==nullptr && origin_count > 0)){ + retval = MPI_ERR_COUNT; + } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) { + retval = MPI_ERR_TYPE; + } else { + int rank = smpi_process()->index(); + MPI_Group group; + win->get_group(&group); + int src_traced = group->index(target_rank); + TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr); + + retval = win->get( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count, + target_datatype); + + TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Rget( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, + MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request* request){ + int retval = 0; + smpi_bench_end(); + if (win == MPI_WIN_NULL) { + retval = MPI_ERR_WIN; + } else if (target_rank == MPI_PROC_NULL) { + *request = MPI_REQUEST_NULL; + retval = MPI_SUCCESS; + } else if (target_rank <0){ + retval = MPI_ERR_RANK; + } else if (win->dynamic()==0 && target_disp <0){ + //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address + retval = MPI_ERR_ARG; + } else if ((origin_count < 0 || target_count < 0) || + (origin_addr==nullptr && origin_count > 0)){ + retval = MPI_ERR_COUNT; + } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) { + retval = MPI_ERR_TYPE; + } else if(request == nullptr){ + retval = MPI_ERR_REQUEST; + } else { + int rank = smpi_process()->index(); + MPI_Group group; + win->get_group(&group); + int src_traced = group->index(target_rank); + TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr); + + retval = win->get( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count, + target_datatype, request); + + TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, + MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win){ + int retval = 0; + smpi_bench_end(); + if (win == MPI_WIN_NULL) { + retval = MPI_ERR_WIN; + } else if (target_rank == MPI_PROC_NULL) { + retval = MPI_SUCCESS; + } else if (target_rank <0){ + retval = MPI_ERR_RANK; + } else if (win->dynamic()==0 && target_disp <0){ + //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address + retval = MPI_ERR_ARG; + } else if ((origin_count < 0 || target_count < 0) || + (origin_addr==nullptr && origin_count > 0)){ + retval = MPI_ERR_COUNT; + } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) { + retval = MPI_ERR_TYPE; + } else { + int rank = smpi_process()->index(); + MPI_Group group; + win->get_group(&group); + int dst_traced = group->index(target_rank); + TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, nullptr); + TRACE_smpi_send(rank, rank, dst_traced, SMPI_RMA_TAG, origin_count*origin_datatype->size()); + + retval = win->put( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count, + target_datatype); + + TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Rput( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, + MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request* request){ + int retval = 0; + smpi_bench_end(); + if (win == MPI_WIN_NULL) { + retval = MPI_ERR_WIN; + } else if (target_rank == MPI_PROC_NULL) { + *request = MPI_REQUEST_NULL; + retval = MPI_SUCCESS; + } else if (target_rank <0){ + retval = MPI_ERR_RANK; + } else if (win->dynamic()==0 && target_disp <0){ + //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address + retval = MPI_ERR_ARG; + } else if ((origin_count < 0 || target_count < 0) || + (origin_addr==nullptr && origin_count > 0)){ + retval = MPI_ERR_COUNT; + } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) { + retval = MPI_ERR_TYPE; + } else if(request == nullptr){ + retval = MPI_ERR_REQUEST; + } else { + int rank = smpi_process()->index(); + MPI_Group group; + win->get_group(&group); + int dst_traced = group->index(target_rank); + TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, nullptr); + TRACE_smpi_send(rank, rank, dst_traced, SMPI_RMA_TAG, origin_count*origin_datatype->size()); + + retval = win->put( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count, + target_datatype, request); + + TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, + MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win){ + int retval = 0; + smpi_bench_end(); + if (win == MPI_WIN_NULL) { + retval = MPI_ERR_WIN; + } else if (target_rank == MPI_PROC_NULL) { + retval = MPI_SUCCESS; + } else if (target_rank <0){ + retval = MPI_ERR_RANK; + } else if (win->dynamic()==0 && target_disp <0){ + //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address + retval = MPI_ERR_ARG; + } else if ((origin_count < 0 || target_count < 0) || + (origin_addr==nullptr && origin_count > 0)){ + retval = MPI_ERR_COUNT; + } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) { + retval = MPI_ERR_TYPE; + } else if (op == MPI_OP_NULL) { + retval = MPI_ERR_OP; + } else { + int rank = smpi_process()->index(); + MPI_Group group; + win->get_group(&group); + int src_traced = group->index(target_rank); + TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr); + + retval = win->accumulate( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count, + target_datatype, op); + + TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Raccumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, + MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request){ + int retval = 0; + smpi_bench_end(); + if (win == MPI_WIN_NULL) { + retval = MPI_ERR_WIN; + } else if (target_rank == MPI_PROC_NULL) { + *request = MPI_REQUEST_NULL; + retval = MPI_SUCCESS; + } else if (target_rank <0){ + retval = MPI_ERR_RANK; + } else if (win->dynamic()==0 && target_disp <0){ + //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address + retval = MPI_ERR_ARG; + } else if ((origin_count < 0 || target_count < 0) || + (origin_addr==nullptr && origin_count > 0)){ + retval = MPI_ERR_COUNT; + } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) { + retval = MPI_ERR_TYPE; + } else if (op == MPI_OP_NULL) { + retval = MPI_ERR_OP; + } else if(request == nullptr){ + retval = MPI_ERR_REQUEST; + } else { + int rank = smpi_process()->index(); + MPI_Group group; + win->get_group(&group); + int src_traced = group->index(target_rank); + TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr); + + retval = win->accumulate( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count, + target_datatype, op, request); + + TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Get_accumulate(void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr, +int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count, +MPI_Datatype target_datatype, MPI_Op op, MPI_Win win){ + int retval = 0; + smpi_bench_end(); + if (win == MPI_WIN_NULL) { + retval = MPI_ERR_WIN; + } else if (target_rank == MPI_PROC_NULL) { + retval = MPI_SUCCESS; + } else if (target_rank <0){ + retval = MPI_ERR_RANK; + } else if (win->dynamic()==0 && target_disp <0){ + //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address + retval = MPI_ERR_ARG; + } else if ((origin_count < 0 || target_count < 0 || result_count <0) || + (origin_addr==nullptr && origin_count > 0 && op != MPI_NO_OP) || + (result_addr==nullptr && result_count > 0)){ + retval = MPI_ERR_COUNT; + } else if ((origin_datatype != MPI_DATATYPE_NULL && not origin_datatype->is_valid()) || + (not target_datatype->is_valid()) || (not result_datatype->is_valid())) { + retval = MPI_ERR_TYPE; + } else if (op == MPI_OP_NULL) { + retval = MPI_ERR_OP; + } else { + int rank = smpi_process()->index(); + MPI_Group group; + win->get_group(&group); + int src_traced = group->index(target_rank); + TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr); + + retval = win->get_accumulate( origin_addr, origin_count, origin_datatype, result_addr, + result_count, result_datatype, target_rank, target_disp, + target_count, target_datatype, op); + + TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + + +int PMPI_Rget_accumulate(void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr, +int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count, +MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request){ + int retval = 0; + smpi_bench_end(); + if (win == MPI_WIN_NULL) { + retval = MPI_ERR_WIN; + } else if (target_rank == MPI_PROC_NULL) { + *request = MPI_REQUEST_NULL; + retval = MPI_SUCCESS; + } else if (target_rank <0){ + retval = MPI_ERR_RANK; + } else if (win->dynamic()==0 && target_disp <0){ + //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address + retval = MPI_ERR_ARG; + } else if ((origin_count < 0 || target_count < 0 || result_count <0) || + (origin_addr==nullptr && origin_count > 0 && op != MPI_NO_OP) || + (result_addr==nullptr && result_count > 0)){ + retval = MPI_ERR_COUNT; + } else if ((origin_datatype != MPI_DATATYPE_NULL && not origin_datatype->is_valid()) || + (not target_datatype->is_valid()) || (not result_datatype->is_valid())) { + retval = MPI_ERR_TYPE; + } else if (op == MPI_OP_NULL) { + retval = MPI_ERR_OP; + } else if(request == nullptr){ + retval = MPI_ERR_REQUEST; + } else { + int rank = smpi_process()->index(); + MPI_Group group; + win->get_group(&group); + int src_traced = group->index(target_rank); + TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr); + + retval = win->get_accumulate( origin_addr, origin_count, origin_datatype, result_addr, + result_count, result_datatype, target_rank, target_disp, + target_count, target_datatype, op, request); + + TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Fetch_and_op(void *origin_addr, void *result_addr, MPI_Datatype dtype, int target_rank, MPI_Aint target_disp, MPI_Op op, MPI_Win win){ + return PMPI_Get_accumulate(origin_addr, origin_addr==nullptr?0:1, dtype, result_addr, 1, dtype, target_rank, target_disp, 1, dtype, op, win); +} + +int PMPI_Compare_and_swap(void *origin_addr, void *compare_addr, + void *result_addr, MPI_Datatype datatype, int target_rank, + MPI_Aint target_disp, MPI_Win win){ + int retval = 0; + smpi_bench_end(); + if (win == MPI_WIN_NULL) { + retval = MPI_ERR_WIN; + } else if (target_rank == MPI_PROC_NULL) { + retval = MPI_SUCCESS; + } else if (target_rank <0){ + retval = MPI_ERR_RANK; + } else if (win->dynamic()==0 && target_disp <0){ + //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address + retval = MPI_ERR_ARG; + } else if (origin_addr==nullptr || result_addr==nullptr || compare_addr==nullptr){ + retval = MPI_ERR_COUNT; + } else if (not datatype->is_valid()) { + retval = MPI_ERR_TYPE; + } else { + int rank = smpi_process()->index(); + MPI_Group group; + win->get_group(&group); + int src_traced = group->index(target_rank); + TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr); + + retval = win->compare_and_swap( origin_addr, compare_addr, result_addr, datatype, + target_rank, target_disp); + + TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Win_post(MPI_Group group, int assert, MPI_Win win){ + int retval = 0; + smpi_bench_end(); + if (win == MPI_WIN_NULL) { + retval = MPI_ERR_WIN; + } else if (group==MPI_GROUP_NULL){ + retval = MPI_ERR_GROUP; + } else { + int rank = smpi_process()->index(); + TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr); + retval = win->post(group,assert); + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Win_start(MPI_Group group, int assert, MPI_Win win){ + int retval = 0; + smpi_bench_end(); + if (win == MPI_WIN_NULL) { + retval = MPI_ERR_WIN; + } else if (group==MPI_GROUP_NULL){ + retval = MPI_ERR_GROUP; + } else { + int rank = smpi_process()->index(); + TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr); + retval = win->start(group,assert); + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Win_complete(MPI_Win win){ + int retval = 0; + smpi_bench_end(); + if (win == MPI_WIN_NULL) { + retval = MPI_ERR_WIN; + } else { + int rank = smpi_process()->index(); + TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr); + + retval = win->complete(); + + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Win_wait(MPI_Win win){ + int retval = 0; + smpi_bench_end(); + if (win == MPI_WIN_NULL) { + retval = MPI_ERR_WIN; + } else { + int rank = smpi_process()->index(); + TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr); + + retval = win->wait(); + + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Win_lock(int lock_type, int rank, int assert, MPI_Win win){ + int retval = 0; + smpi_bench_end(); + if (win == MPI_WIN_NULL) { + retval = MPI_ERR_WIN; + } else if (lock_type != MPI_LOCK_EXCLUSIVE && + lock_type != MPI_LOCK_SHARED) { + retval = MPI_ERR_LOCKTYPE; + } else if (rank == MPI_PROC_NULL){ + retval = MPI_SUCCESS; + } else { + int myrank = smpi_process()->index(); + TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr); + retval = win->lock(lock_type,rank,assert); + TRACE_smpi_collective_out(myrank, -1, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Win_unlock(int rank, MPI_Win win){ + int retval = 0; + smpi_bench_end(); + if (win == MPI_WIN_NULL) { + retval = MPI_ERR_WIN; + } else if (rank == MPI_PROC_NULL){ + retval = MPI_SUCCESS; + } else { + int myrank = smpi_process()->index(); + TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr); + retval = win->unlock(rank); + TRACE_smpi_collective_out(myrank, -1, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Win_lock_all(int assert, MPI_Win win){ + int retval = 0; + smpi_bench_end(); + if (win == MPI_WIN_NULL) { + retval = MPI_ERR_WIN; + } else { + int myrank = smpi_process()->index(); + TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr); + retval = win->lock_all(assert); + TRACE_smpi_collective_out(myrank, -1, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Win_unlock_all(MPI_Win win){ + int retval = 0; + smpi_bench_end(); + if (win == MPI_WIN_NULL) { + retval = MPI_ERR_WIN; + } else { + int myrank = smpi_process()->index(); + TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr); + retval = win->unlock_all(); + TRACE_smpi_collective_out(myrank, -1, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Win_flush(int rank, MPI_Win win){ + int retval = 0; + smpi_bench_end(); + if (win == MPI_WIN_NULL) { + retval = MPI_ERR_WIN; + } else if (rank == MPI_PROC_NULL){ + retval = MPI_SUCCESS; + } else { + int myrank = smpi_process()->index(); + TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr); + retval = win->flush(rank); + TRACE_smpi_collective_out(myrank, -1, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Win_flush_local(int rank, MPI_Win win){ + int retval = 0; + smpi_bench_end(); + if (win == MPI_WIN_NULL) { + retval = MPI_ERR_WIN; + } else if (rank == MPI_PROC_NULL){ + retval = MPI_SUCCESS; + } else { + int myrank = smpi_process()->index(); + TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr); + retval = win->flush_local(rank); + TRACE_smpi_collective_out(myrank, -1, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Win_flush_all(MPI_Win win){ + int retval = 0; + smpi_bench_end(); + if (win == MPI_WIN_NULL) { + retval = MPI_ERR_WIN; + } else { + int myrank = smpi_process()->index(); + TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr); + retval = win->flush_all(); + TRACE_smpi_collective_out(myrank, -1, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + +int PMPI_Win_flush_local_all(MPI_Win win){ + int retval = 0; + smpi_bench_end(); + if (win == MPI_WIN_NULL) { + retval = MPI_ERR_WIN; + } else { + int myrank = smpi_process()->index(); + TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr); + retval = win->flush_local_all(); + TRACE_smpi_collective_out(myrank, -1, __FUNCTION__); + } + smpi_bench_begin(); + return retval; +} + + +int PMPI_Win_get_attr (MPI_Win win, int keyval, void *attribute_val, int* flag) +{ + static MPI_Aint size; + static int disp_unit; + if (win==MPI_WIN_NULL) + return MPI_ERR_TYPE; + else{ + switch (keyval) { + case MPI_WIN_BASE : + *static_cast(attribute_val) = win->base(); + *flag = 1; + return MPI_SUCCESS; + case MPI_WIN_SIZE : + size = win->size(); + *static_cast(attribute_val) = &size; + *flag = 1; + return MPI_SUCCESS; + case MPI_WIN_DISP_UNIT : + disp_unit=win->disp_unit(); + *static_cast(attribute_val) = &disp_unit; + *flag = 1; + return MPI_SUCCESS; + default: + return win->attr_get(keyval, attribute_val, flag); + } +} + +} + +int PMPI_Win_set_attr (MPI_Win win, int type_keyval, void *attribute_val) +{ + if (win==MPI_WIN_NULL) + return MPI_ERR_TYPE; + else + return win->attr_put(type_keyval, attribute_val); +} + +int PMPI_Win_delete_attr (MPI_Win win, int type_keyval) +{ + if (win==MPI_WIN_NULL) + return MPI_ERR_TYPE; + else + return win->attr_delete(type_keyval); +} + +int PMPI_Win_create_keyval(MPI_Win_copy_attr_function* copy_fn, MPI_Win_delete_attr_function* delete_fn, int* keyval, + void* extra_state) +{ + smpi_copy_fn _copy_fn={nullptr, nullptr, copy_fn}; + smpi_delete_fn _delete_fn={nullptr, nullptr, delete_fn}; + return simgrid::smpi::Keyval::keyval_create(_copy_fn, _delete_fn, keyval, extra_state); +} + +int PMPI_Win_free_keyval(int* keyval) { + return simgrid::smpi::Keyval::keyval_free(keyval); +} + +MPI_Win PMPI_Win_f2c(MPI_Fint win){ + return static_cast(simgrid::smpi::Win::f2c(win)); +} + +MPI_Fint PMPI_Win_c2f(MPI_Win win){ + return win->c2f(); +} + +} diff --git a/src/smpi/colls/allgather/allgather-rdb.cpp b/src/smpi/colls/allgather/allgather-rdb.cpp index 1c028e8178..5e1d47b398 100644 --- a/src/smpi/colls/allgather/allgather-rdb.cpp +++ b/src/smpi/colls/allgather/allgather-rdb.cpp @@ -4,7 +4,7 @@ * under the terms of the license (GNU LGPL) which comes with this package. */ #include "../colls_private.h" -#include "src/smpi/smpi_status.hpp" +#include "smpi_status.hpp" namespace simgrid{ namespace smpi{ diff --git a/src/smpi/colls/allgatherv/allgatherv-mpich-rdb.cpp b/src/smpi/colls/allgatherv/allgatherv-mpich-rdb.cpp index fb374b9073..9226be163e 100644 --- a/src/smpi/colls/allgatherv/allgatherv-mpich-rdb.cpp +++ b/src/smpi/colls/allgatherv/allgatherv-mpich-rdb.cpp @@ -7,7 +7,7 @@ * recursive doubling algorithm */ #include "../colls_private.h" -#include "src/smpi/smpi_status.hpp" +#include "smpi_status.hpp" namespace simgrid{ namespace smpi{ diff --git a/src/smpi/colls/alltoall/alltoall-pair.cpp b/src/smpi/colls/alltoall/alltoall-pair.cpp index 24b1a75eaf..2dd6e00c81 100644 --- a/src/smpi/colls/alltoall/alltoall-pair.cpp +++ b/src/smpi/colls/alltoall/alltoall-pair.cpp @@ -5,7 +5,7 @@ * under the terms of the license (GNU LGPL) which comes with this package. */ #include "../colls_private.h" -#include "src/smpi/smpi_win.hpp" +#include "smpi_win.hpp" /***************************************************************************** diff --git a/src/smpi/colls/alltoall/alltoall-rdb.cpp b/src/smpi/colls/alltoall/alltoall-rdb.cpp index be4860d8d1..5067aa75ff 100644 --- a/src/smpi/colls/alltoall/alltoall-rdb.cpp +++ b/src/smpi/colls/alltoall/alltoall-rdb.cpp @@ -5,7 +5,7 @@ * under the terms of the license (GNU LGPL) which comes with this package. */ #include "../colls_private.h" -#include "src/smpi/smpi_status.hpp" +#include "smpi_status.hpp" /***************************************************************************** diff --git a/src/smpi/colls/bcast/bcast-scatter-LR-allgather.cpp b/src/smpi/colls/bcast/bcast-scatter-LR-allgather.cpp index 3466fdf996..6b304ade5f 100644 --- a/src/smpi/colls/bcast/bcast-scatter-LR-allgather.cpp +++ b/src/smpi/colls/bcast/bcast-scatter-LR-allgather.cpp @@ -4,7 +4,7 @@ * under the terms of the license (GNU LGPL) which comes with this package. */ #include "../colls_private.h" -#include "src/smpi/smpi_status.hpp" +#include "smpi_status.hpp" /***************************************************************************** diff --git a/src/smpi/colls/bcast/bcast-scatter-rdb-allgather.cpp b/src/smpi/colls/bcast/bcast-scatter-rdb-allgather.cpp index f81694d4d1..f48e31a536 100644 --- a/src/smpi/colls/bcast/bcast-scatter-rdb-allgather.cpp +++ b/src/smpi/colls/bcast/bcast-scatter-rdb-allgather.cpp @@ -4,7 +4,7 @@ * under the terms of the license (GNU LGPL) which comes with this package. */ #include "../colls_private.h" -#include "src/smpi/smpi_status.hpp" +#include "smpi_status.hpp" namespace simgrid{ namespace smpi{ diff --git a/src/smpi/colls/colls_private.h b/src/smpi/colls/colls_private.h index c621957823..fc28459fbd 100644 --- a/src/smpi/colls/colls_private.h +++ b/src/smpi/colls/colls_private.h @@ -8,12 +8,12 @@ #include #include "smpi/mpi.h" -#include "src/smpi/private.h" -#include "src/smpi/smpi_coll.hpp" -#include "src/smpi/smpi_comm.hpp" -#include "src/smpi/smpi_datatype.hpp" -#include "src/smpi/smpi_op.hpp" -#include "src/smpi/smpi_request.hpp" +#include "private.h" +#include "smpi_coll.hpp" +#include "smpi_comm.hpp" +#include "smpi_datatype.hpp" +#include "smpi_op.hpp" +#include "smpi_request.hpp" XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_colls); diff --git a/src/smpi/colls/smpi_automatic_selector.cpp b/src/smpi/colls/smpi_automatic_selector.cpp index 9afee1433c..b69b56ccba 100644 --- a/src/smpi/colls/smpi_automatic_selector.cpp +++ b/src/smpi/colls/smpi_automatic_selector.cpp @@ -9,7 +9,7 @@ #include #include "colls_private.h" -#include "src/smpi/smpi_process.hpp" +#include "smpi_process.hpp" //attempt to do a quick autotuning version of the collective, diff --git a/src/smpi/smpi_coll.cpp b/src/smpi/colls/smpi_coll.cpp similarity index 98% rename from src/smpi/smpi_coll.cpp rename to src/smpi/colls/smpi_coll.cpp index a26b1f7ed0..a0760feabf 100644 --- a/src/smpi/smpi_coll.cpp +++ b/src/smpi/colls/smpi_coll.cpp @@ -5,12 +5,12 @@ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ -#include "src/smpi/private.h" -#include "src/smpi/smpi_coll.hpp" -#include "src/smpi/smpi_comm.hpp" -#include "src/smpi/smpi_datatype.hpp" -#include "src/smpi/smpi_op.hpp" -#include "src/smpi/smpi_request.hpp" +#include "private.h" +#include "smpi_coll.hpp" +#include "smpi_comm.hpp" +#include "smpi_datatype.hpp" +#include "smpi_op.hpp" +#include "smpi_request.hpp" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_coll, smpi, "Logging specific to SMPI (coll)"); diff --git a/src/smpi/colls/smpi_default_selector.cpp b/src/smpi/colls/smpi_default_selector.cpp index d674e4ad56..d3736cc81b 100644 --- a/src/smpi/colls/smpi_default_selector.cpp +++ b/src/smpi/colls/smpi_default_selector.cpp @@ -7,7 +7,7 @@ * under the terms of the license (GNU LGPL) which comes with this package. */ #include "colls_private.h" -#include "src/smpi/smpi_process.hpp" +#include "smpi_process.hpp" namespace simgrid{ namespace smpi{ diff --git a/src/smpi/SmpiHost.hpp b/src/smpi/include/SmpiHost.hpp similarity index 100% rename from src/smpi/SmpiHost.hpp rename to src/smpi/include/SmpiHost.hpp diff --git a/src/smpi/private.h b/src/smpi/include/private.h similarity index 87% rename from src/smpi/private.h rename to src/smpi/include/private.h index 7caa1763f1..0b713bee8f 100644 --- a/src/smpi/private.h +++ b/src/smpi/include/private.h @@ -45,6 +45,13 @@ enum smpi_process_state{ //SMPI_RMA_TAG has to be the smallest one, as it will be decremented for accumulate ordering. #define SMPI_RMA_TAG -6666 +/* Convert between Fortran and C */ + +#define FORT_BOTTOM(addr) ((*(int*)addr) == -200 ? MPI_BOTTOM : (void*)addr) +#define FORT_IN_PLACE(addr) ((*(int*)addr) == -100 ? MPI_IN_PLACE : (void*)addr) +#define FORT_STATUS_IGNORE(addr) (static_cast((*(int*)addr) == -300 ? MPI_STATUS_IGNORE : (void*)addr)) +#define FORT_STATUSES_IGNORE(addr) (static_cast((*(int*)addr) == -400 ? MPI_STATUSES_IGNORE : (void*)addr)) + extern XBT_PRIVATE MPI_Comm MPI_COMM_UNINITIALIZED; typedef SMPI_Cart_topology *MPIR_Cart_Topology; @@ -114,6 +121,7 @@ void mpi_comm_free_(int* comm, int* ierr); void mpi_comm_split_(int* comm, int* color, int* key, int* comm_out, int* ierr); void mpi_group_incl_(int* group, int* n, int* key, int* group_out, int* ierr) ; void mpi_comm_group_(int* comm, int* group_out, int* ierr); +void mpi_comm_create_group_ (int* comm, int* group, int, int* comm_out, int* ierr); void mpi_send_init_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr); void mpi_isend_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr); void mpi_irsend_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr); @@ -160,7 +168,6 @@ void mpi_type_extent_(int* datatype, MPI_Aint * extent, int* ierr); void mpi_attr_get_(int* comm, int* keyval, void* attr_value, int* flag, int* ierr ); void mpi_type_commit_(int* datatype, int* ierr); void mpi_type_vector_(int* count, int* blocklen, int* stride, int* old_type, int* newtype, int* ierr); -void mpi_type_create_vector_(int* count, int* blocklen, int* stride, int* old_type, int* newtype, int* ierr); void mpi_type_hvector_(int* count, int* blocklen, MPI_Aint* stride, int* old_type, int* newtype, int* ierr); void mpi_type_create_hvector_(int* count, int* blocklen, MPI_Aint* stride, int* old_type, int* newtype, int* ierr); void mpi_type_free_(int* datatype, int* ierr); @@ -175,6 +182,26 @@ void mpi_win_post_(int* group, int assert, int* win, int* ierr); void mpi_win_start_(int* group, int assert, int* win, int* ierr); void mpi_win_complete_(int* win, int* ierr); void mpi_win_wait_(int* win, int* ierr); +void mpi_win_allocate_( MPI_Aint* size, int* disp_unit, int* info, int* comm, void* base, int* win, int* ierr); +void mpi_win_attach_(int* win, int* base, MPI_Aint* size, int* ierr); +void mpi_win_create_dynamic_( int* info, int* comm, int *win, int* ierr); +void mpi_win_detach_(int* win, int* base, int* ierr); +void mpi_win_set_info_(int* win, int* info, int* ierr); +void mpi_win_get_info_(int* win, int* info, int* ierr); +void mpi_win_get_group_(int* win, int* group, int* ierr); +void mpi_win_get_attr_(int* win, int* type_keyval, void* attribute_val, int* flag, int* ierr); +void mpi_win_set_attr_(int* win, int* type_keyval, void* att, int* ierr); +void mpi_win_delete_attr_(int* win, int* comm_keyval, int* ierr); +void mpi_win_create_keyval_(void* copy_fn, void* delete_fn, int* keyval, void* extra_state, int* ierr); +void mpi_win_free_keyval_(int* keyval, int* ierr); +void mpi_win_lock_(int* lock_type, int* rank, int* assert, int* win, int* ierr); +void mpi_win_lock_all_(int* assert, int* win, int* ierr); +void mpi_win_unlock_(int* rank, int* win, int* ierr); +void mpi_win_unlock_all_(int* win, int* ierr); +void mpi_win_flush_(int* rank, int* win, int* ierr); +void mpi_win_flush_local_(int* rank, int* win, int* ierr); +void mpi_win_flush_all_(int* win, int* ierr); +void mpi_win_flush_local_all_(int* win, int* ierr); void mpi_info_create_( int *info, int* ierr); void mpi_info_set_( int *info, char *key, char *value, int* ierr, unsigned int keylen, unsigned int valuelen); void mpi_info_free_(int* info, int* ierr); @@ -182,8 +209,23 @@ void mpi_get_( int *origin_addr, int* origin_count, int* origin_datatype, int* t MPI_Aint* target_disp, int* target_count, int* target_datatype, int* win, int* ierr); void mpi_put_( int *origin_addr, int* origin_count, int* origin_datatype, int* target_rank, MPI_Aint* target_disp, int* target_count, int* target_datatype, int* win, int* ierr); +void mpi_rget_( int *origin_addr, int* origin_count, int* origin_datatype, int* target_rank, + MPI_Aint* target_disp, int* target_count, int* target_datatype, int* win, int* request, int* ierr); +void mpi_rput_( int *origin_addr, int* origin_count, int* origin_datatype, int* target_rank, + MPI_Aint* target_disp, int* target_count, int* target_datatype, int* win, int* request, int* ierr); +void mpi_fetch_and_op_( int *origin_addr, int* result_addr, int* datatype, int* target_rank, MPI_Aint* target_disp, int* op, int* win, int* ierr); +void mpi_compare_and_swap_( int *origin_addr, int* compare_addr, int* result_addr, + int* datatype, int* target_rank, MPI_Aint* target_disp, int* win, int* ierr); +void mpi_get_accumulate_(int *origin_addr, int* origin_count, int* origin_datatype, int* result_addr, + int* result_count, int* result_datatype, int* target_rank, MPI_Aint* target_disp, int* target_count, + int* target_datatype, int* op, int* win, int* ierr); +void mpi_rget_accumulate_(int *origin_addr, int* origin_count, int* origin_datatype, int* result_addr, + int* result_count, int* result_datatype, int* target_rank, MPI_Aint* target_disp, int* target_count, + int* target_datatype, int* op, int* win, int* request, int* ierr); void mpi_accumulate_( int *origin_addr, int* origin_count, int* origin_datatype, int* target_rank, MPI_Aint* target_disp, int* target_count, int* target_datatype, int* op, int* win, int* ierr); +void mpi_raccumulate_( int *origin_addr, int* origin_count, int* origin_datatype, int* target_rank, + MPI_Aint* target_disp, int* target_count, int* target_datatype, int* op, int* win, int* request, int* ierr); void mpi_error_string_(int* errorcode, char* string, int* resultlen, int* ierr); void mpi_sendrecv_(void* sendbuf, int* sendcount, int* sendtype, int* dst, int* sendtag, void *recvbuf, int* recvcount, int* recvtype, int* src, int* recvtag, int* comm, MPI_Status* status, int* ierr); @@ -207,6 +249,7 @@ void mpi_type_get_extent_ (int* datatype, MPI_Aint * lb, MPI_Aint * extent, int* void mpi_type_get_true_extent_ (int* datatype, MPI_Aint * lb, MPI_Aint * extent, int* ierr); void mpi_op_create_ (void * function, int* commute, int* op, int* ierr); void mpi_op_free_ (int* op, int* ierr); +void mpi_op_commutative_ (int* op, int* commute, int* ierr); void mpi_group_free_ (int* group, int* ierr); void mpi_group_size_ (int* group, int *size, int* ierr); void mpi_group_rank_ (int* group, int *rank, int* ierr); @@ -275,6 +318,7 @@ void mpi_type_create_hindexed_ (int* count, int* blocklens, MPI_Aint* indices, i void mpi_type_create_hindexed_block_ (int* count, int* blocklength, MPI_Aint* indices, int* old_type, int* newtype, int* ierr) ; void mpi_type_indexed_ (int* count, int* blocklens, int* indices, int* old_type, int* newtype, int* ierr) ; +void mpi_type_create_indexed_ (int* count, int* blocklens, int* indices, int* old_type, int* newtype, int* ierr) ; void mpi_type_create_indexed_block_ (int* count, int* blocklength, int* indices, int* old_type, int*newtype, int* ierr); void mpi_type_struct_ (int* count, int* blocklens, MPI_Aint* indices, int* old_types, int* newtype, int* ierr) ; diff --git a/src/smpi/private.hpp b/src/smpi/include/private.hpp similarity index 100% rename from src/smpi/private.hpp rename to src/smpi/include/private.hpp diff --git a/src/smpi/smpi_coll.hpp b/src/smpi/include/smpi_coll.hpp similarity index 100% rename from src/smpi/smpi_coll.hpp rename to src/smpi/include/smpi_coll.hpp diff --git a/src/smpi/smpi_comm.hpp b/src/smpi/include/smpi_comm.hpp similarity index 95% rename from src/smpi/smpi_comm.hpp rename to src/smpi/include/smpi_comm.hpp index d31a50ad61..ed621f1a98 100644 --- a/src/smpi/smpi_comm.hpp +++ b/src/smpi/include/smpi_comm.hpp @@ -7,9 +7,9 @@ #define SMPI_COMM_HPP_INCLUDED #include -#include "src/smpi/smpi_keyvals.hpp" -#include "src/smpi/smpi_group.hpp" -#include "src/smpi/smpi_topo.hpp" +#include "smpi_keyvals.hpp" +#include "smpi_group.hpp" +#include "smpi_topo.hpp" namespace simgrid{ namespace smpi{ diff --git a/src/smpi/smpi_datatype.hpp b/src/smpi/include/smpi_datatype.hpp similarity index 98% rename from src/smpi/smpi_datatype.hpp rename to src/smpi/include/smpi_datatype.hpp index 51978b5c71..3b1fd7dab9 100644 --- a/src/smpi/smpi_datatype.hpp +++ b/src/smpi/include/smpi_datatype.hpp @@ -6,8 +6,8 @@ #ifndef SMPI_DATATYPE_HPP #define SMPI_DATATYPE_HPP -#include "src/smpi/smpi_f2c.hpp" -#include "src/smpi/smpi_keyvals.hpp" +#include "smpi_f2c.hpp" +#include "smpi_keyvals.hpp" #define DT_FLAG_DESTROYED 0x0001 /**< user destroyed but some other layers still have a reference */ #define DT_FLAG_COMMITED 0x0002 /**< ready to be used for a send/recv operation */ diff --git a/src/smpi/smpi_datatype_derived.hpp b/src/smpi/include/smpi_datatype_derived.hpp similarity index 98% rename from src/smpi/smpi_datatype_derived.hpp rename to src/smpi/include/smpi_datatype_derived.hpp index 4ed9f738f5..b450bd9f98 100644 --- a/src/smpi/smpi_datatype_derived.hpp +++ b/src/smpi/include/smpi_datatype_derived.hpp @@ -7,7 +7,7 @@ #ifndef SMPI_DATATYPE_DERIVED_HPP #define SMPI_DATATYPE_DERIVED_HPP -#include "src/smpi/smpi_datatype.hpp" +#include "smpi_datatype.hpp" namespace simgrid{ namespace smpi{ diff --git a/src/smpi/smpi_f2c.hpp b/src/smpi/include/smpi_f2c.hpp similarity index 100% rename from src/smpi/smpi_f2c.hpp rename to src/smpi/include/smpi_f2c.hpp diff --git a/src/smpi/smpi_group.hpp b/src/smpi/include/smpi_group.hpp similarity index 97% rename from src/smpi/smpi_group.hpp rename to src/smpi/include/smpi_group.hpp index 9c40f5e530..64812ca7e6 100644 --- a/src/smpi/smpi_group.hpp +++ b/src/smpi/include/smpi_group.hpp @@ -7,7 +7,7 @@ #ifndef SMPI_GROUP_HPP_INCLUDED #define SMPI_GROUP_HPP_INCLUDED -#include "src/smpi/smpi_f2c.hpp" +#include "smpi_f2c.hpp" namespace simgrid{ namespace smpi{ diff --git a/src/smpi/smpi_info.hpp b/src/smpi/include/smpi_info.hpp similarity index 96% rename from src/smpi/smpi_info.hpp rename to src/smpi/include/smpi_info.hpp index 5eb096d34e..c8f049be73 100644 --- a/src/smpi/smpi_info.hpp +++ b/src/smpi/include/smpi_info.hpp @@ -7,7 +7,7 @@ #ifndef SMPI_INFO_HPP #define SMPI_INFO_HPP -#include "src/smpi/smpi_f2c.hpp" +#include "smpi_f2c.hpp" #include "smpi/smpi.h" #include "xbt/dict.h" diff --git a/src/smpi/smpi_keyvals.hpp b/src/smpi/include/smpi_keyvals.hpp similarity index 100% rename from src/smpi/smpi_keyvals.hpp rename to src/smpi/include/smpi_keyvals.hpp diff --git a/src/smpi/smpi_op.hpp b/src/smpi/include/smpi_op.hpp similarity index 95% rename from src/smpi/smpi_op.hpp rename to src/smpi/include/smpi_op.hpp index 167a346bac..f884189ebf 100644 --- a/src/smpi/smpi_op.hpp +++ b/src/smpi/include/smpi_op.hpp @@ -7,7 +7,7 @@ #ifndef SMPI_OP_HPP #define SMPI_OP_HPP -#include "src/smpi/smpi_info.hpp" +#include "smpi_info.hpp" namespace simgrid{ namespace smpi{ diff --git a/src/smpi/smpi_process.hpp b/src/smpi/include/smpi_process.hpp similarity index 100% rename from src/smpi/smpi_process.hpp rename to src/smpi/include/smpi_process.hpp diff --git a/src/smpi/smpi_request.hpp b/src/smpi/include/smpi_request.hpp similarity index 99% rename from src/smpi/smpi_request.hpp rename to src/smpi/include/smpi_request.hpp index 535ebab307..5a11b2d009 100644 --- a/src/smpi/smpi_request.hpp +++ b/src/smpi/include/smpi_request.hpp @@ -7,7 +7,7 @@ #define SMPI_REQUEST_HPP_INCLUDED #include "smpi/smpi.h" -#include "src/smpi/smpi_f2c.hpp" +#include "smpi_f2c.hpp" namespace simgrid{ namespace smpi{ diff --git a/src/smpi/smpi_status.hpp b/src/smpi/include/smpi_status.hpp similarity index 100% rename from src/smpi/smpi_status.hpp rename to src/smpi/include/smpi_status.hpp diff --git a/src/smpi/smpi_topo.hpp b/src/smpi/include/smpi_topo.hpp similarity index 95% rename from src/smpi/smpi_topo.hpp rename to src/smpi/include/smpi_topo.hpp index a884931201..d0cb5709ea 100644 --- a/src/smpi/smpi_topo.hpp +++ b/src/smpi/include/smpi_topo.hpp @@ -6,8 +6,8 @@ #ifndef SMPI_TOPO_HPP_INCLUDED #define SMPI_TOPO_HPP_INCLUDED -#include "src/smpi/smpi_comm.hpp" -#include "src/smpi/smpi_status.hpp" +#include "smpi_comm.hpp" +#include "smpi_status.hpp" typedef SMPI_Topology *MPI_Topology; diff --git a/src/smpi/smpi_win.hpp b/src/smpi/include/smpi_win.hpp similarity index 98% rename from src/smpi/smpi_win.hpp rename to src/smpi/include/smpi_win.hpp index e219be5588..ea49c1238f 100644 --- a/src/smpi/smpi_win.hpp +++ b/src/smpi/include/smpi_win.hpp @@ -7,7 +7,7 @@ #ifndef SMPI_WIN_HPP_INCLUDED #define SMPI_WIN_HPP_INCLUDED -#include "src/smpi/smpi_keyvals.hpp" +#include "smpi_keyvals.hpp" #include "xbt/synchro.h" #include diff --git a/src/smpi/SmpiHost.cpp b/src/smpi/internals/SmpiHost.cpp similarity index 99% rename from src/smpi/SmpiHost.cpp rename to src/smpi/internals/SmpiHost.cpp index cc9a814877..6f7139162d 100644 --- a/src/smpi/SmpiHost.cpp +++ b/src/smpi/internals/SmpiHost.cpp @@ -3,7 +3,7 @@ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ -#include "src/smpi/SmpiHost.hpp" +#include "SmpiHost.hpp" #include "simgrid/s4u/VirtualMachine.hpp" #include "smpi/smpi_utils.hpp" diff --git a/src/smpi/instr_smpi.cpp b/src/smpi/internals/instr_smpi.cpp similarity index 100% rename from src/smpi/instr_smpi.cpp rename to src/smpi/internals/instr_smpi.cpp diff --git a/src/smpi/smpi_bench.cpp b/src/smpi/internals/smpi_bench.cpp similarity index 99% rename from src/smpi/smpi_bench.cpp rename to src/smpi/internals/smpi_bench.cpp index ce3502d56c..b570e7b1ea 100644 --- a/src/smpi/smpi_bench.cpp +++ b/src/smpi/internals/smpi_bench.cpp @@ -8,8 +8,8 @@ #include "private.hpp" #include "simgrid/modelchecker.h" #include "src/mc/mc_replay.h" -#include "src/smpi/smpi_process.hpp" -#include "src/smpi/smpi_comm.hpp" +#include "smpi_process.hpp" +#include "smpi_comm.hpp" #ifndef WIN32 #include diff --git a/src/smpi/smpi_deployment.cpp b/src/smpi/internals/smpi_deployment.cpp similarity index 97% rename from src/smpi/smpi_deployment.cpp rename to src/smpi/internals/smpi_deployment.cpp index 1abaa45278..3d88598f98 100644 --- a/src/smpi/smpi_deployment.cpp +++ b/src/smpi/internals/smpi_deployment.cpp @@ -5,9 +5,9 @@ * under the terms of the license (GNU LGPL) which comes with this package. */ #include "simgrid/msg.h" /* barrier */ -#include "src/smpi/SmpiHost.hpp" -#include "src/smpi/private.h" -#include "src/smpi/smpi_comm.hpp" +#include "SmpiHost.hpp" +#include "private.h" +#include "smpi_comm.hpp" #include namespace simgrid { diff --git a/src/smpi/smpi_dvfs.cpp b/src/smpi/internals/smpi_dvfs.cpp similarity index 100% rename from src/smpi/smpi_dvfs.cpp rename to src/smpi/internals/smpi_dvfs.cpp diff --git a/src/smpi/smpi_global.cpp b/src/smpi/internals/smpi_global.cpp similarity index 98% rename from src/smpi/smpi_global.cpp rename to src/smpi/internals/smpi_global.cpp index afa8e584d0..1504bd2d35 100644 --- a/src/smpi/smpi_global.cpp +++ b/src/smpi/internals/smpi_global.cpp @@ -9,15 +9,15 @@ #include "src/msg/msg_private.h" #include "src/simix/smx_private.h" #include "src/surf/surf_interface.hpp" -#include "src/smpi/SmpiHost.hpp" +#include "SmpiHost.hpp" #include "xbt/config.hpp" -#include "src/smpi/private.h" -#include "src/smpi/private.hpp" -#include "src/smpi/smpi_coll.hpp" -#include "src/smpi/smpi_comm.hpp" -#include "src/smpi/smpi_group.hpp" -#include "src/smpi/smpi_info.hpp" -#include "src/smpi/smpi_process.hpp" +#include "private.h" +#include "private.hpp" +#include "smpi_coll.hpp" +#include "smpi_comm.hpp" +#include "smpi_group.hpp" +#include "smpi_info.hpp" +#include "smpi_process.hpp" #include #include diff --git a/src/smpi/smpi_memory.cpp b/src/smpi/internals/smpi_memory.cpp similarity index 99% rename from src/smpi/smpi_memory.cpp rename to src/smpi/internals/smpi_memory.cpp index e071be307e..719b28b63b 100644 --- a/src/smpi/smpi_memory.cpp +++ b/src/smpi/internals/smpi_memory.cpp @@ -23,8 +23,8 @@ #include "src/xbt/memory_map.hpp" -#include "src/smpi/private.h" -#include "src/smpi/private.hpp" +#include "private.h" +#include "private.hpp" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_memory, smpi, "Memory layout support for SMPI"); diff --git a/src/smpi/smpi_process.cpp b/src/smpi/internals/smpi_process.cpp similarity index 97% rename from src/smpi/smpi_process.cpp rename to src/smpi/internals/smpi_process.cpp index a7da2c8d9d..451f3cf9d8 100644 --- a/src/smpi/smpi_process.cpp +++ b/src/smpi/internals/smpi_process.cpp @@ -7,11 +7,11 @@ #include "src/mc/mc_replay.h" #include "src/msg/msg_private.h" #include "src/simix/smx_private.h" -#include "src/smpi/private.h" -#include "src/smpi/private.hpp" -#include "src/smpi/smpi_process.hpp" -#include "src/smpi/smpi_group.hpp" -#include "src/smpi/smpi_comm.hpp" +#include "private.h" +#include "private.hpp" +#include "smpi_process.hpp" +#include "smpi_group.hpp" +#include "smpi_comm.hpp" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_process, smpi, "Logging specific to SMPI (kernel)"); diff --git a/src/smpi/smpi_replay.cpp b/src/smpi/internals/smpi_replay.cpp similarity index 99% rename from src/smpi/smpi_replay.cpp rename to src/smpi/internals/smpi_replay.cpp index 9ba7295206..c41904ef8b 100644 --- a/src/smpi/smpi_replay.cpp +++ b/src/smpi/internals/smpi_replay.cpp @@ -3,13 +3,13 @@ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ -#include "src/smpi/private.h" -#include "src/smpi/smpi_coll.hpp" -#include "src/smpi/smpi_comm.hpp" -#include "src/smpi/smpi_datatype.hpp" -#include "src/smpi/smpi_group.hpp" -#include "src/smpi/smpi_process.hpp" -#include "src/smpi/smpi_request.hpp" +#include "private.h" +#include "smpi_coll.hpp" +#include "smpi_comm.hpp" +#include "smpi_datatype.hpp" +#include "smpi_group.hpp" +#include "smpi_process.hpp" +#include "smpi_request.hpp" #include "xbt/replay.hpp" #include diff --git a/src/smpi/smpi_shared.cpp b/src/smpi/internals/smpi_shared.cpp similarity index 100% rename from src/smpi/smpi_shared.cpp rename to src/smpi/internals/smpi_shared.cpp diff --git a/src/smpi/smpi_static_variables.cpp b/src/smpi/internals/smpi_static_variables.cpp similarity index 100% rename from src/smpi/smpi_static_variables.cpp rename to src/smpi/internals/smpi_static_variables.cpp diff --git a/src/smpi/smpi_utils.cpp b/src/smpi/internals/smpi_utils.cpp similarity index 100% rename from src/smpi/smpi_utils.cpp rename to src/smpi/internals/smpi_utils.cpp diff --git a/src/smpi/smpi_comm.cpp b/src/smpi/mpi/smpi_comm.cpp similarity index 97% rename from src/smpi/smpi_comm.cpp rename to src/smpi/mpi/smpi_comm.cpp index 23de66f493..ee0747f8d6 100644 --- a/src/smpi/smpi_comm.cpp +++ b/src/smpi/mpi/smpi_comm.cpp @@ -7,15 +7,15 @@ #include #include "src/simix/smx_private.h" -#include "src/smpi/private.h" -#include "src/smpi/private.hpp" -#include "src/smpi/smpi_comm.hpp" -#include "src/smpi/smpi_coll.hpp" -#include "src/smpi/smpi_datatype.hpp" -#include "src/smpi/smpi_process.hpp" -#include "src/smpi/smpi_request.hpp" -#include "src/smpi/smpi_status.hpp" -#include "src/smpi/smpi_win.hpp" +#include "private.h" +#include "private.hpp" +#include "smpi_comm.hpp" +#include "smpi_coll.hpp" +#include "smpi_datatype.hpp" +#include "smpi_process.hpp" +#include "smpi_request.hpp" +#include "smpi_status.hpp" +#include "smpi_win.hpp" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi, "Logging specific to SMPI (comm)"); diff --git a/src/smpi/smpi_datatype.cpp b/src/smpi/mpi/smpi_datatype.cpp similarity index 98% rename from src/smpi/smpi_datatype.cpp rename to src/smpi/mpi/smpi_datatype.cpp index de74190462..565ec99448 100644 --- a/src/smpi/smpi_datatype.cpp +++ b/src/smpi/mpi/smpi_datatype.cpp @@ -5,11 +5,11 @@ * under the terms of the license (GNU LGPL) which comes with this package. */ #include "simgrid/modelchecker.h" -#include "src/smpi/private.h" -#include "src/smpi/private.hpp" -#include "src/smpi/smpi_datatype_derived.hpp" -#include "src/smpi/smpi_op.hpp" -#include "src/smpi/smpi_process.hpp" +#include "private.h" +#include "private.hpp" +#include "smpi_datatype_derived.hpp" +#include "smpi_op.hpp" +#include "smpi_process.hpp" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_datatype, smpi, "Logging specific to SMPI (datatype)"); diff --git a/src/smpi/smpi_datatype_derived.cpp b/src/smpi/mpi/smpi_datatype_derived.cpp similarity index 99% rename from src/smpi/smpi_datatype_derived.cpp rename to src/smpi/mpi/smpi_datatype_derived.cpp index 4551048a74..ba77e82eef 100644 --- a/src/smpi/smpi_datatype_derived.cpp +++ b/src/smpi/mpi/smpi_datatype_derived.cpp @@ -4,8 +4,8 @@ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ -#include "src/smpi/smpi_datatype_derived.hpp" -#include "src/smpi/smpi_op.hpp" +#include "smpi_datatype_derived.hpp" +#include "smpi_op.hpp" XBT_LOG_EXTERNAL_CATEGORY(smpi_datatype); diff --git a/src/smpi/smpi_f2c.cpp b/src/smpi/mpi/smpi_f2c.cpp similarity index 94% rename from src/smpi/smpi_f2c.cpp rename to src/smpi/mpi/smpi_f2c.cpp index df584b4f5e..e5a5706bcc 100644 --- a/src/smpi/smpi_f2c.cpp +++ b/src/smpi/mpi/smpi_f2c.cpp @@ -3,9 +3,9 @@ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ -#include "src/smpi/private.h" -#include "src/smpi/smpi_f2c.hpp" -#include "src/smpi/smpi_process.hpp" +#include "private.h" +#include "smpi_f2c.hpp" +#include "smpi_process.hpp" #include diff --git a/src/smpi/smpi_group.cpp b/src/smpi/mpi/smpi_group.cpp similarity index 99% rename from src/smpi/smpi_group.cpp rename to src/smpi/mpi/smpi_group.cpp index a7e56babcb..115cf49a17 100644 --- a/src/smpi/smpi_group.cpp +++ b/src/smpi/mpi/smpi_group.cpp @@ -3,8 +3,8 @@ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ -#include "src/smpi/smpi_comm.hpp" -#include "src/smpi/smpi_group.hpp" +#include "smpi_comm.hpp" +#include "smpi_group.hpp" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_group, smpi, "Logging specific to SMPI (group)"); diff --git a/src/smpi/smpi_info.cpp b/src/smpi/mpi/smpi_info.cpp similarity index 98% rename from src/smpi/smpi_info.cpp rename to src/smpi/mpi/smpi_info.cpp index 584ab1f08d..093da595f1 100644 --- a/src/smpi/smpi_info.cpp +++ b/src/smpi/mpi/smpi_info.cpp @@ -4,7 +4,7 @@ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ -#include "src/smpi/smpi_info.hpp" +#include "smpi_info.hpp" #include "xbt/ex.hpp" #include "xbt/sysdep.h" diff --git a/src/smpi/smpi_keyvals.cpp b/src/smpi/mpi/smpi_keyvals.cpp similarity index 97% rename from src/smpi/smpi_keyvals.cpp rename to src/smpi/mpi/smpi_keyvals.cpp index 25aafe45d3..559ef5c3c9 100644 --- a/src/smpi/smpi_keyvals.cpp +++ b/src/smpi/mpi/smpi_keyvals.cpp @@ -5,7 +5,7 @@ * under the terms of the license (GNU LGPL) which comes with this package. */ //#include "private.h" -#include "src/smpi/smpi_keyvals.hpp" +#include "smpi_keyvals.hpp" namespace simgrid{ namespace smpi{ diff --git a/src/smpi/smpi_op.cpp b/src/smpi/mpi/smpi_op.cpp similarity index 97% rename from src/smpi/smpi_op.cpp rename to src/smpi/mpi/smpi_op.cpp index 4f52efedcd..67d6ae8a1c 100644 --- a/src/smpi/smpi_op.cpp +++ b/src/smpi/mpi/smpi_op.cpp @@ -3,11 +3,11 @@ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ -#include "src/smpi/private.h" -#include "src/smpi/private.hpp" -#include "src/smpi/smpi_datatype.hpp" -#include "src/smpi/smpi_op.hpp" -#include "src/smpi/smpi_process.hpp" +#include "private.h" +#include "private.hpp" +#include "smpi_datatype.hpp" +#include "smpi_op.hpp" +#include "smpi_process.hpp" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_op, smpi, "Logging specific to SMPI (op)"); diff --git a/src/smpi/smpi_request.cpp b/src/smpi/mpi/smpi_request.cpp similarity index 99% rename from src/smpi/smpi_request.cpp rename to src/smpi/mpi/smpi_request.cpp index 19c5497096..a4c0a36e11 100644 --- a/src/smpi/smpi_request.cpp +++ b/src/smpi/mpi/smpi_request.cpp @@ -3,18 +3,18 @@ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ -#include "src/smpi/smpi_request.hpp" +#include "smpi_request.hpp" #include "mc/mc.h" #include "src/kernel/activity/CommImpl.hpp" #include "src/mc/mc_replay.h" -#include "src/smpi/SmpiHost.hpp" -#include "src/smpi/private.h" -#include "src/smpi/private.hpp" -#include "src/smpi/smpi_comm.hpp" -#include "src/smpi/smpi_datatype.hpp" -#include "src/smpi/smpi_op.hpp" -#include "src/smpi/smpi_process.hpp" +#include "SmpiHost.hpp" +#include "private.h" +#include "private.hpp" +#include "smpi_comm.hpp" +#include "smpi_datatype.hpp" +#include "smpi_op.hpp" +#include "smpi_process.hpp" #include diff --git a/src/smpi/smpi_status.cpp b/src/smpi/mpi/smpi_status.cpp similarity index 89% rename from src/smpi/smpi_status.cpp rename to src/smpi/mpi/smpi_status.cpp index 22c84bbf2f..575046580d 100644 --- a/src/smpi/smpi_status.cpp +++ b/src/smpi/mpi/smpi_status.cpp @@ -5,8 +5,8 @@ #include "private.h" #include "src/simix/smx_private.h" -#include "src/smpi/smpi_datatype.hpp" -#include "src/smpi/smpi_status.hpp" +#include "smpi_datatype.hpp" +#include "smpi_status.hpp" namespace simgrid{ namespace smpi{ diff --git a/src/smpi/smpi_topo.cpp b/src/smpi/mpi/smpi_topo.cpp similarity index 99% rename from src/smpi/smpi_topo.cpp rename to src/smpi/mpi/smpi_topo.cpp index 432174ed2b..fa3701d3b8 100644 --- a/src/smpi/smpi_topo.cpp +++ b/src/smpi/mpi/smpi_topo.cpp @@ -8,8 +8,8 @@ #include "private.h" #include #include -#include "src/smpi/smpi_comm.hpp" -#include "src/smpi/smpi_topo.hpp" +#include "smpi_comm.hpp" +#include "smpi_topo.hpp" /* static functions */ static int assignnodes(int ndim, int nfactor, int *pfacts,int **pdims); diff --git a/src/smpi/smpi_win.cpp b/src/smpi/mpi/smpi_win.cpp similarity index 98% rename from src/smpi/smpi_win.cpp rename to src/smpi/mpi/smpi_win.cpp index 5d83d8cd41..f2782d153e 100644 --- a/src/smpi/smpi_win.cpp +++ b/src/smpi/mpi/smpi_win.cpp @@ -3,15 +3,15 @@ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ -#include "src/smpi/private.h" -#include "src/smpi/smpi_coll.hpp" -#include "src/smpi/smpi_comm.hpp" -#include "src/smpi/smpi_datatype.hpp" -#include "src/smpi/smpi_info.hpp" -#include "src/smpi/smpi_keyvals.hpp" -#include "src/smpi/smpi_process.hpp" -#include "src/smpi/smpi_request.hpp" -#include "src/smpi/smpi_win.hpp" +#include "private.h" +#include "smpi_coll.hpp" +#include "smpi_comm.hpp" +#include "smpi_datatype.hpp" +#include "smpi_info.hpp" +#include "smpi_keyvals.hpp" +#include "smpi_process.hpp" +#include "smpi_request.hpp" +#include "smpi_win.hpp" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_rma, smpi, "Logging specific to SMPI (RMA operations)"); diff --git a/src/smpi/smpi_f77.cpp b/src/smpi/smpi_f77.cpp deleted file mode 100644 index ffaad7c8f1..0000000000 --- a/src/smpi/smpi_f77.cpp +++ /dev/null @@ -1,1602 +0,0 @@ -/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */ - -/* This program is free software; you can redistribute it and/or modify it - * under the terms of the license (GNU LGPL) which comes with this package. */ - -#include "src/smpi/private.h" -#include "src/smpi/smpi_comm.hpp" -#include "src/smpi/smpi_datatype.hpp" -#include "src/smpi/smpi_op.hpp" -#include "src/smpi/smpi_process.hpp" -#include "src/smpi/smpi_request.hpp" -#include "src/smpi/smpi_win.hpp" - -static int running_processes = 0; - -#if defined(__alpha__) || defined(__sparc64__) || defined(__x86_64__) || defined(__ia64__) -typedef int integer; -#else -typedef long int integer; -#endif - -/* Convert between Fortran and C */ - -#define FORT_BOTTOM(addr) ((*(int*)addr) == -200 ? MPI_BOTTOM : (void*)addr) -#define FORT_IN_PLACE(addr) ((*(int*)addr) == -100 ? MPI_IN_PLACE : (void*)addr) -#define FORT_STATUS_IGNORE(addr) (static_cast((*(int*)addr) == -300 ? MPI_STATUS_IGNORE : (void*)addr)) -#define FORT_STATUSES_IGNORE(addr) (static_cast((*(int*)addr) == -400 ? MPI_STATUSES_IGNORE : (void*)addr)) - -#define KEY_SIZE (sizeof(int) * 2 + 1) - -static char* get_key(char* key, int id) { - snprintf(key, KEY_SIZE, "%x",id); - return key; -} - -static char* get_key_id(char* key, int id) { - snprintf(key, KEY_SIZE, "%x_%d",id, smpi_process()->index()); - return key; -} - -static void smpi_init_fortran_types(){ - if(simgrid::smpi::F2C::lookup() == nullptr){ - MPI_COMM_WORLD->add_f(); - MPI_BYTE->add_f();//MPI_BYTE - MPI_CHAR->add_f();//MPI_CHARACTER -#if defined(__alpha__) || defined(__sparc64__) || defined(__x86_64__) || defined(__ia64__) - MPI_C_BOOL->add_f();//MPI_LOGICAL - MPI_INT->add_f();//MPI_INTEGER -#else - MPI_C_BOOL->add_f();//MPI_LOGICAL - MPI_LONG->add_f();//MPI_INTEGER -#endif - MPI_INT8_T->add_f();//MPI_INTEGER1 - MPI_INT16_T->add_f();//MPI_INTEGER2 - MPI_INT32_T->add_f();//MPI_INTEGER4 - MPI_INT64_T->add_f();//MPI_INTEGER8 - MPI_REAL->add_f();//MPI_REAL - MPI_REAL4->add_f();//MPI_REAL4 - MPI_REAL8->add_f();//MPI_REAL8 - MPI_DOUBLE->add_f();//MPI_DOUBLE_PRECISION - MPI_C_FLOAT_COMPLEX->add_f();//MPI_COMPLEX - MPI_C_DOUBLE_COMPLEX->add_f();//MPI_DOUBLE_COMPLEX -#if defined(__alpha__) || defined(__sparc64__) || defined(__x86_64__) || defined(__ia64__) - MPI_2INT->add_f();//MPI_2INTEGER -#else - MPI_2LONG->add_f();//MPI_2INTEGER -#endif - MPI_UINT8_T->add_f();//MPI_LOGICAL1 - MPI_UINT16_T->add_f();//MPI_LOGICAL2 - MPI_UINT32_T->add_f();//MPI_LOGICAL4 - MPI_UINT64_T->add_f();//MPI_LOGICAL8 - MPI_2FLOAT->add_f();//MPI_2REAL - MPI_2DOUBLE->add_f();//MPI_2DOUBLE_PRECISION - MPI_PTR->add_f();//MPI_AINT - MPI_OFFSET->add_f();//MPI_OFFSET - MPI_AINT->add_f();//MPI_COUNT - MPI_REAL16->add_f();//MPI_REAL16 - MPI_PACKED->add_f();//MPI_PACKED - - MPI_MAX->add_f(); - MPI_MIN->add_f(); - MPI_MAXLOC->add_f(); - MPI_MINLOC->add_f(); - MPI_SUM->add_f(); - MPI_PROD->add_f(); - MPI_LAND->add_f(); - MPI_LOR->add_f(); - MPI_LXOR->add_f(); - MPI_BAND->add_f(); - MPI_BOR->add_f(); - MPI_BXOR->add_f(); - } -} - -extern "C" { // This should really use the C linkage to be usable from Fortran - - -void mpi_init_(int* ierr) { - smpi_init_fortran_types(); - *ierr = MPI_Init(nullptr, nullptr); - running_processes++; -} - -void mpi_finalize_(int* ierr) { - *ierr = MPI_Finalize(); - running_processes--; - if(running_processes==0){ - simgrid::smpi::F2C::delete_lookup(); - } -} - -void mpi_abort_(int* comm, int* errorcode, int* ierr) { - *ierr = MPI_Abort(simgrid::smpi::Comm::f2c(*comm), *errorcode); -} - -void mpi_comm_rank_(int* comm, int* rank, int* ierr) { - *ierr = MPI_Comm_rank(simgrid::smpi::Comm::f2c(*comm), rank); -} - -void mpi_comm_size_(int* comm, int* size, int* ierr) { - *ierr = MPI_Comm_size(simgrid::smpi::Comm::f2c(*comm), size); -} - -double mpi_wtime_() { - return MPI_Wtime(); -} - -double mpi_wtick_() { - return MPI_Wtick(); -} - -void mpi_comm_dup_(int* comm, int* newcomm, int* ierr) { - MPI_Comm tmp; - - *ierr = MPI_Comm_dup(simgrid::smpi::Comm::f2c(*comm), &tmp); - if(*ierr == MPI_SUCCESS) { - *newcomm = tmp->add_f(); - } -} - -void mpi_comm_create_(int* comm, int* group, int* newcomm, int* ierr) { - MPI_Comm tmp; - - *ierr = MPI_Comm_create(simgrid::smpi::Comm::f2c(*comm),simgrid::smpi::Group::f2c(*group), &tmp); - if(*ierr == MPI_SUCCESS) { - *newcomm = tmp->add_f(); - } -} - -void mpi_comm_free_(int* comm, int* ierr) { - MPI_Comm tmp = simgrid::smpi::Comm::f2c(*comm); - - *ierr = MPI_Comm_free(&tmp); - - if(*ierr == MPI_SUCCESS) { - simgrid::smpi::Comm::free_f(*comm); - } -} - -void mpi_comm_split_(int* comm, int* color, int* key, int* comm_out, int* ierr) { - MPI_Comm tmp; - - *ierr = MPI_Comm_split(simgrid::smpi::Comm::f2c(*comm), *color, *key, &tmp); - if(*ierr == MPI_SUCCESS) { - *comm_out = tmp->add_f(); - } -} - -void mpi_group_incl_(int* group, int* n, int* ranks, int* group_out, int* ierr) { - MPI_Group tmp; - - *ierr = MPI_Group_incl(simgrid::smpi::Group::f2c(*group), *n, ranks, &tmp); - if(*ierr == MPI_SUCCESS) { - *group_out = tmp->add_f(); - } -} - -void mpi_comm_group_(int* comm, int* group_out, int* ierr) { - MPI_Group tmp; - - *ierr = MPI_Comm_group(simgrid::smpi::Comm::f2c(*comm), &tmp); - if(*ierr == MPI_SUCCESS) { - *group_out = tmp->c2f(); - } -} - -void mpi_initialized_(int* flag, int* ierr){ - *ierr = MPI_Initialized(flag); -} - -void mpi_send_init_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr) { - MPI_Request req; - buf = static_cast(FORT_BOTTOM(buf)); - *ierr = MPI_Send_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm), &req); - if(*ierr == MPI_SUCCESS) { - *request = req->add_f(); - } -} - -void mpi_isend_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr) { - MPI_Request req; - buf = static_cast(FORT_BOTTOM(buf)); - *ierr = MPI_Isend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm), &req); - if(*ierr == MPI_SUCCESS) { - *request = req->add_f(); - } -} - -void mpi_irsend_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr) { - MPI_Request req; - buf = static_cast(FORT_BOTTOM(buf)); - *ierr = MPI_Irsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm), &req); - if(*ierr == MPI_SUCCESS) { - *request = req->add_f(); - } -} - -void mpi_send_(void* buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* ierr) { - buf = static_cast(FORT_BOTTOM(buf)); - *ierr = MPI_Send(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm)); -} - -void mpi_rsend_(void* buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* ierr) { - buf = static_cast(FORT_BOTTOM(buf)); - *ierr = MPI_Rsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm)); -} - -void mpi_sendrecv_(void* sendbuf, int* sendcount, int* sendtype, int* dst, int* sendtag, void *recvbuf, int* recvcount, - int* recvtype, int* src, int* recvtag, int* comm, MPI_Status* status, int* ierr) { - sendbuf = static_cast( FORT_BOTTOM(sendbuf)); - recvbuf = static_cast( FORT_BOTTOM(recvbuf)); - *ierr = MPI_Sendrecv(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), *dst, *sendtag, recvbuf, *recvcount, - simgrid::smpi::Datatype::f2c(*recvtype), *src, *recvtag, simgrid::smpi::Comm::f2c(*comm), FORT_STATUS_IGNORE(status)); -} - -void mpi_recv_init_(void *buf, int* count, int* datatype, int* src, int* tag, int* comm, int* request, int* ierr) { - MPI_Request req; - buf = static_cast( FORT_BOTTOM(buf)); - *ierr = MPI_Recv_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *src, *tag, simgrid::smpi::Comm::f2c(*comm), &req); - if(*ierr == MPI_SUCCESS) { - *request = req->add_f(); - } -} - -void mpi_irecv_(void *buf, int* count, int* datatype, int* src, int* tag, int* comm, int* request, int* ierr) { - MPI_Request req; - buf = static_cast( FORT_BOTTOM(buf)); - *ierr = MPI_Irecv(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *src, *tag, simgrid::smpi::Comm::f2c(*comm), &req); - if(*ierr == MPI_SUCCESS) { - *request = req->add_f(); - } -} - -void mpi_recv_(void* buf, int* count, int* datatype, int* src, int* tag, int* comm, MPI_Status* status, int* ierr) { - buf = static_cast( FORT_BOTTOM(buf)); - *ierr = MPI_Recv(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *src, *tag, simgrid::smpi::Comm::f2c(*comm), status); -} - -void mpi_start_(int* request, int* ierr) { - MPI_Request req = simgrid::smpi::Request::f2c(*request); - - *ierr = MPI_Start(&req); -} - -void mpi_startall_(int* count, int* requests, int* ierr) { - MPI_Request* reqs; - int i; - - reqs = xbt_new(MPI_Request, *count); - for(i = 0; i < *count; i++) { - reqs[i] = simgrid::smpi::Request::f2c(requests[i]); - } - *ierr = MPI_Startall(*count, reqs); - xbt_free(reqs); -} - -void mpi_wait_(int* request, MPI_Status* status, int* ierr) { - MPI_Request req = simgrid::smpi::Request::f2c(*request); - - *ierr = MPI_Wait(&req, FORT_STATUS_IGNORE(status)); - if(req==MPI_REQUEST_NULL){ - simgrid::smpi::Request::free_f(*request); - *request=MPI_FORTRAN_REQUEST_NULL; - } -} - -void mpi_waitany_(int* count, int* requests, int* index, MPI_Status* status, int* ierr) { - MPI_Request* reqs; - int i; - - reqs = xbt_new(MPI_Request, *count); - for(i = 0; i < *count; i++) { - reqs[i] = simgrid::smpi::Request::f2c(requests[i]); - } - *ierr = MPI_Waitany(*count, reqs, index, status); - if(reqs[*index]==MPI_REQUEST_NULL){ - simgrid::smpi::Request::free_f(requests[*index]); - requests[*index]=MPI_FORTRAN_REQUEST_NULL; - } - xbt_free(reqs); -} - -void mpi_waitall_(int* count, int* requests, MPI_Status* status, int* ierr) { - MPI_Request* reqs; - int i; - - reqs = xbt_new(MPI_Request, *count); - for(i = 0; i < *count; i++) { - reqs[i] = simgrid::smpi::Request::f2c(requests[i]); - } - *ierr = MPI_Waitall(*count, reqs, FORT_STATUSES_IGNORE(status)); - for(i = 0; i < *count; i++) { - if(reqs[i]==MPI_REQUEST_NULL){ - simgrid::smpi::Request::free_f(requests[i]); - requests[i]=MPI_FORTRAN_REQUEST_NULL; - } - } - - xbt_free(reqs); -} - -void mpi_barrier_(int* comm, int* ierr) { - *ierr = MPI_Barrier(simgrid::smpi::Comm::f2c(*comm)); -} - -void mpi_bcast_(void *buf, int* count, int* datatype, int* root, int* comm, int* ierr) { - *ierr = MPI_Bcast(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *root, simgrid::smpi::Comm::f2c(*comm)); -} - -void mpi_reduce_(void* sendbuf, void* recvbuf, int* count, int* datatype, int* op, int* root, int* comm, int* ierr) { - sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); - sendbuf = static_cast( FORT_BOTTOM(sendbuf)); - recvbuf = static_cast( FORT_BOTTOM(recvbuf)); - *ierr = MPI_Reduce(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op), *root, simgrid::smpi::Comm::f2c(*comm)); -} - -void mpi_allreduce_(void* sendbuf, void* recvbuf, int* count, int* datatype, int* op, int* comm, int* ierr) { - sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); - *ierr = MPI_Allreduce(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm)); -} - -void mpi_reduce_scatter_(void* sendbuf, void* recvbuf, int* recvcounts, int* datatype, int* op, int* comm, int* ierr) { - sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); - *ierr = MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, simgrid::smpi::Datatype::f2c(*datatype), - simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm)); -} - -void mpi_scatter_(void* sendbuf, int* sendcount, int* sendtype, void* recvbuf, int* recvcount, int* recvtype, - int* root, int* comm, int* ierr) { - recvbuf = static_cast( FORT_IN_PLACE(recvbuf)); - *ierr = MPI_Scatter(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), - recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm)); -} - -void mpi_scatterv_(void* sendbuf, int* sendcounts, int* displs, int* sendtype, - void* recvbuf, int* recvcount, int* recvtype, int* root, int* comm, int* ierr) { - recvbuf = static_cast( FORT_IN_PLACE(recvbuf)); - *ierr = MPI_Scatterv(sendbuf, sendcounts, displs, simgrid::smpi::Datatype::f2c(*sendtype), - recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm)); -} - -void mpi_gather_(void* sendbuf, int* sendcount, int* sendtype, void* recvbuf, int* recvcount, int* recvtype, - int* root, int* comm, int* ierr) { - sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); - sendbuf = sendbuf!=MPI_IN_PLACE ? static_cast( FORT_BOTTOM(sendbuf)) : MPI_IN_PLACE; - recvbuf = static_cast( FORT_BOTTOM(recvbuf)); - *ierr = MPI_Gather(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), - recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm)); -} - -void mpi_gatherv_(void* sendbuf, int* sendcount, int* sendtype, - void* recvbuf, int* recvcounts, int* displs, int* recvtype, int* root, int* comm, int* ierr) { - sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); - sendbuf = sendbuf!=MPI_IN_PLACE ? static_cast( FORT_BOTTOM(sendbuf)) : MPI_IN_PLACE; - recvbuf = static_cast( FORT_BOTTOM(recvbuf)); - *ierr = MPI_Gatherv(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), - recvbuf, recvcounts, displs, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm)); -} - -void mpi_allgather_(void* sendbuf, int* sendcount, int* sendtype, void* recvbuf, int* recvcount, int* recvtype, - int* comm, int* ierr) { - sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); - *ierr = MPI_Allgather(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), - recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm)); -} - -void mpi_allgatherv_(void* sendbuf, int* sendcount, int* sendtype, - void* recvbuf, int* recvcounts,int* displs, int* recvtype, int* comm, int* ierr) { - sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); - *ierr = MPI_Allgatherv(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), - recvbuf, recvcounts, displs, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm)); -} - -void mpi_scan_(void* sendbuf, void* recvbuf, int* count, int* datatype, int* op, int* comm, int* ierr) { - *ierr = MPI_Scan(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), - simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm)); -} - -void mpi_alltoall_(void* sendbuf, int* sendcount, int* sendtype, - void* recvbuf, int* recvcount, int* recvtype, int* comm, int* ierr) { - *ierr = MPI_Alltoall(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), - recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm)); -} - -void mpi_alltoallv_(void* sendbuf, int* sendcounts, int* senddisps, int* sendtype, - void* recvbuf, int* recvcounts, int* recvdisps, int* recvtype, int* comm, int* ierr) { - *ierr = MPI_Alltoallv(sendbuf, sendcounts, senddisps, simgrid::smpi::Datatype::f2c(*sendtype), - recvbuf, recvcounts, recvdisps, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm)); -} - -void mpi_test_ (int * request, int *flag, MPI_Status * status, int* ierr){ - MPI_Request req = simgrid::smpi::Request::f2c(*request); - *ierr= MPI_Test(&req, flag, FORT_STATUS_IGNORE(status)); - if(req==MPI_REQUEST_NULL){ - simgrid::smpi::Request::free_f(*request); - *request=MPI_FORTRAN_REQUEST_NULL; - } -} - -void mpi_testall_ (int* count, int * requests, int *flag, MPI_Status * statuses, int* ierr){ - int i; - MPI_Request* reqs = xbt_new(MPI_Request, *count); - for(i = 0; i < *count; i++) { - reqs[i] = simgrid::smpi::Request::f2c(requests[i]); - } - *ierr= MPI_Testall(*count, reqs, flag, FORT_STATUSES_IGNORE(statuses)); - for(i = 0; i < *count; i++) { - if(reqs[i]==MPI_REQUEST_NULL){ - simgrid::smpi::Request::free_f(requests[i]); - requests[i]=MPI_FORTRAN_REQUEST_NULL; - } - } - xbt_free(reqs); -} - -void mpi_get_processor_name_(char *name, int *resultlen, int* ierr){ - *ierr = MPI_Get_processor_name(name, resultlen); -} - -void mpi_get_count_(MPI_Status * status, int* datatype, int *count, int* ierr){ - *ierr = MPI_Get_count(FORT_STATUS_IGNORE(status), simgrid::smpi::Datatype::f2c(*datatype), count); -} - -void mpi_attr_get_(int* comm, int* keyval, void* attr_value, int* flag, int* ierr ){ - *ierr = MPI_Attr_get(simgrid::smpi::Comm::f2c(*comm), *keyval, attr_value, flag); -} - -void mpi_type_extent_(int* datatype, MPI_Aint * extent, int* ierr){ - *ierr= MPI_Type_extent(simgrid::smpi::Datatype::f2c(*datatype), extent); -} - -void mpi_type_commit_(int* datatype, int* ierr){ - MPI_Datatype tmp= simgrid::smpi::Datatype::f2c(*datatype); - *ierr= MPI_Type_commit(&tmp); -} - -void mpi_type_vector_(int* count, int* blocklen, int* stride, int* old_type, int* newtype, int* ierr){ - MPI_Datatype tmp; - *ierr= MPI_Type_vector(*count, *blocklen, *stride, simgrid::smpi::Datatype::f2c(*old_type), &tmp); - if(*ierr == MPI_SUCCESS) { - *newtype = tmp->add_f(); - } -} - -void mpi_type_create_vector_(int* count, int* blocklen, int* stride, int* old_type, int* newtype, int* ierr){ - MPI_Datatype tmp; - *ierr= MPI_Type_vector(*count, *blocklen, *stride, simgrid::smpi::Datatype::f2c(*old_type), &tmp); - if(*ierr == MPI_SUCCESS) { - *newtype = tmp->add_f(); - } -} - -void mpi_type_hvector_(int* count, int* blocklen, MPI_Aint* stride, int* old_type, int* newtype, int* ierr){ - MPI_Datatype tmp; - *ierr= MPI_Type_hvector (*count, *blocklen, *stride, simgrid::smpi::Datatype::f2c(*old_type), &tmp); - if(*ierr == MPI_SUCCESS) { - *newtype = tmp->add_f(); - } -} - -void mpi_type_create_hvector_(int* count, int* blocklen, MPI_Aint* stride, int* old_type, int* newtype, int* ierr){ - MPI_Datatype tmp; - *ierr= MPI_Type_hvector(*count, *blocklen, *stride, simgrid::smpi::Datatype::f2c(*old_type), &tmp); - if(*ierr == MPI_SUCCESS) { - *newtype = tmp->add_f(); - } -} - -void mpi_type_free_(int* datatype, int* ierr){ - MPI_Datatype tmp= simgrid::smpi::Datatype::f2c(*datatype); - *ierr= MPI_Type_free (&tmp); - if(*ierr == MPI_SUCCESS) { - simgrid::smpi::F2C::free_f(*datatype); - } -} - -void mpi_type_ub_(int* datatype, MPI_Aint * disp, int* ierr){ - *ierr= MPI_Type_ub(simgrid::smpi::Datatype::f2c(*datatype), disp); -} - -void mpi_type_lb_(int* datatype, MPI_Aint * extent, int* ierr){ - *ierr= MPI_Type_extent(simgrid::smpi::Datatype::f2c(*datatype), extent); -} - -void mpi_type_size_(int* datatype, int *size, int* ierr) -{ - *ierr = MPI_Type_size(simgrid::smpi::Datatype::f2c(*datatype), size); -} - -void mpi_error_string_(int* errorcode, char* string, int* resultlen, int* ierr){ - *ierr = MPI_Error_string(*errorcode, string, resultlen); -} - -void mpi_win_fence_( int* assert, int* win, int* ierr){ - *ierr = MPI_Win_fence(* assert, simgrid::smpi::Win::f2c(*win)); -} - -void mpi_win_free_( int* win, int* ierr){ - MPI_Win tmp = simgrid::smpi::Win::f2c(*win); - *ierr = MPI_Win_free(&tmp); - if(*ierr == MPI_SUCCESS) { - simgrid::smpi::F2C::free_f(*win); - } -} - -void mpi_win_create_( int *base, MPI_Aint* size, int* disp_unit, int* info, int* comm, int *win, int* ierr){ - MPI_Win tmp; - *ierr = MPI_Win_create( static_cast(base), *size, *disp_unit, simgrid::smpi::Info::f2c(*info), simgrid::smpi::Comm::f2c(*comm),&tmp); - if(*ierr == MPI_SUCCESS) { - *win = tmp->add_f(); - } -} - -void mpi_win_post_(int* group, int assert, int* win, int* ierr){ - *ierr = MPI_Win_post(simgrid::smpi::Group::f2c(*group), assert, simgrid::smpi::Win::f2c(*win)); -} - -void mpi_win_start_(int* group, int assert, int* win, int* ierr){ - *ierr = MPI_Win_start(simgrid::smpi::Group::f2c(*group), assert, simgrid::smpi::Win::f2c(*win)); -} - -void mpi_win_complete_(int* win, int* ierr){ - *ierr = MPI_Win_complete(simgrid::smpi::Win::f2c(*win)); -} - -void mpi_win_wait_(int* win, int* ierr){ - *ierr = MPI_Win_wait(simgrid::smpi::Win::f2c(*win)); -} - -void mpi_win_set_name_ (int* win, char * name, int* ierr, int size){ - //handle trailing blanks - while(name[size-1]==' ') - size--; - while(*name==' '){//handle leading blanks - size--; - name++; - } - char* tname = xbt_new(char,size+1); - strncpy(tname, name, size); - tname[size]='\0'; - *ierr = MPI_Win_set_name(simgrid::smpi::Win::f2c(*win), tname); - xbt_free(tname); -} - -void mpi_win_get_name_ (int* win, char * name, int* len, int* ierr){ - *ierr = MPI_Win_get_name(simgrid::smpi::Win::f2c(*win),name,len); - if(*len>0) - name[*len]=' ';//blank padding, not \0 -} - -void mpi_info_create_( int *info, int* ierr){ - MPI_Info tmp; - *ierr = MPI_Info_create(&tmp); - if(*ierr == MPI_SUCCESS) { - *info = tmp->add_f(); - } -} - -void mpi_info_set_( int *info, char *key, char *value, int* ierr, unsigned int keylen, unsigned int valuelen){ - //handle trailing blanks - while(key[keylen-1]==' ') - keylen--; - while(*key==' '){//handle leading blanks - keylen--; - key++; - } - char* tkey = xbt_new(char,keylen+1); - strncpy(tkey, key, keylen); - tkey[keylen]='\0'; - - while(value[valuelen-1]==' ') - valuelen--; - while(*value==' '){//handle leading blanks - valuelen--; - value++; - } - char* tvalue = xbt_new(char,valuelen+1); - strncpy(tvalue, value, valuelen); - tvalue[valuelen]='\0'; - - *ierr = MPI_Info_set( simgrid::smpi::Info::f2c(*info), tkey, tvalue); - xbt_free(tkey); - xbt_free(tvalue); -} - -void mpi_info_get_ (int* info,char *key,int* valuelen, char *value, int *flag, int* ierr, unsigned int keylen ){ - while(key[keylen-1]==' ') - keylen--; - while(*key==' '){//handle leading blanks - keylen--; - key++; - } - char* tkey = xbt_new(char,keylen+1); - strncpy(tkey, key, keylen); - tkey[keylen]='\0'; - *ierr = MPI_Info_get(simgrid::smpi::Info::f2c(*info),tkey,*valuelen, value, flag); - xbt_free(tkey); - if(*flag!=0){ - int replace=0; - int i=0; - for (i=0; i<*valuelen; i++){ - if(value[i]=='\0') - replace=1; - if(replace) - value[i]=' '; - } - } -} - -void mpi_info_free_(int* info, int* ierr){ - MPI_Info tmp = simgrid::smpi::Info::f2c(*info); - *ierr = MPI_Info_free(&tmp); - if(*ierr == MPI_SUCCESS) { - simgrid::smpi::F2C::free_f(*info); - } -} - -void mpi_get_( int *origin_addr, int* origin_count, int* origin_datatype, int *target_rank, - MPI_Aint* target_disp, int *target_count, int* tarsmpi_type_f2c, int* win, int* ierr){ - *ierr = MPI_Get( static_cast(origin_addr),*origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype),*target_rank, - *target_disp, *target_count, simgrid::smpi::Datatype::f2c(*tarsmpi_type_f2c), simgrid::smpi::Win::f2c(*win)); -} - -void mpi_accumulate_( int *origin_addr, int* origin_count, int* origin_datatype, int *target_rank, - MPI_Aint* target_disp, int *target_count, int* tarsmpi_type_f2c, int* op, int* win, int* ierr){ - *ierr = MPI_Accumulate( static_cast(origin_addr),*origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype),*target_rank, - *target_disp, *target_count, simgrid::smpi::Datatype::f2c(*tarsmpi_type_f2c), simgrid::smpi::Op::f2c(*op), simgrid::smpi::Win::f2c(*win)); -} - -void mpi_put_( int *origin_addr, int* origin_count, int* origin_datatype, int *target_rank, - MPI_Aint* target_disp, int *target_count, int* tarsmpi_type_f2c, int* win, int* ierr){ - *ierr = MPI_Put( static_cast(origin_addr),*origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype),*target_rank, - *target_disp, *target_count, simgrid::smpi::Datatype::f2c(*tarsmpi_type_f2c), simgrid::smpi::Win::f2c(*win)); -} - -//following are automatically generated, and have to be checked -void mpi_finalized_ (int * flag, int* ierr){ - - *ierr = MPI_Finalized(flag); -} - -void mpi_init_thread_ (int* required, int *provided, int* ierr){ - smpi_init_fortran_types(); - *ierr = MPI_Init_thread(nullptr, nullptr,*required, provided); - running_processes++; -} - -void mpi_query_thread_ (int *provided, int* ierr){ - - *ierr = MPI_Query_thread(provided); -} - -void mpi_is_thread_main_ (int *flag, int* ierr){ - - *ierr = MPI_Is_thread_main(flag); -} - -void mpi_address_ (void *location, MPI_Aint * address, int* ierr){ - - *ierr = MPI_Address(location, address); -} - -void mpi_get_address_ (void *location, MPI_Aint * address, int* ierr){ - - *ierr = MPI_Get_address(location, address); -} - -void mpi_type_dup_ (int* datatype, int* newdatatype, int* ierr){ - MPI_Datatype tmp; - *ierr = MPI_Type_dup(simgrid::smpi::Datatype::f2c(*datatype), &tmp); - if(*ierr == MPI_SUCCESS) { - *newdatatype = tmp->add_f(); - } -} - -void mpi_type_set_name_ (int* datatype, char * name, int* ierr, int size){ - char* tname = xbt_new(char, size+1); - strncpy(tname, name, size); - tname[size]='\0'; - *ierr = MPI_Type_set_name(simgrid::smpi::Datatype::f2c(*datatype), tname); - xbt_free(tname); -} - -void mpi_type_get_name_ (int* datatype, char * name, int* len, int* ierr){ - *ierr = MPI_Type_get_name(simgrid::smpi::Datatype::f2c(*datatype),name,len); - if(*len>0) - name[*len]=' '; -} - -void mpi_type_get_attr_ (int* type, int* type_keyval, void *attribute_val, int* flag, int* ierr){ - - *ierr = MPI_Type_get_attr ( simgrid::smpi::Datatype::f2c(*type), *type_keyval, attribute_val,flag); -} - -void mpi_type_set_attr_ (int* type, int* type_keyval, void *attribute_val, int* ierr){ - - *ierr = MPI_Type_set_attr ( simgrid::smpi::Datatype::f2c(*type), *type_keyval, attribute_val); -} - -void mpi_type_delete_attr_ (int* type, int* type_keyval, int* ierr){ - - *ierr = MPI_Type_delete_attr ( simgrid::smpi::Datatype::f2c(*type), *type_keyval); -} - -void mpi_type_create_keyval_ (void* copy_fn, void* delete_fn, int* keyval, void* extra_state, int* ierr){ - - *ierr = MPI_Type_create_keyval(reinterpret_cast(copy_fn), reinterpret_cast(delete_fn), - keyval, extra_state) ; -} - -void mpi_type_free_keyval_ (int* keyval, int* ierr) { - *ierr = MPI_Type_free_keyval( keyval); -} - -void mpi_pcontrol_ (int* level , int* ierr){ - *ierr = MPI_Pcontrol(*static_cast(level)); -} - -void mpi_type_get_extent_ (int* datatype, MPI_Aint * lb, MPI_Aint * extent, int* ierr){ - - *ierr = MPI_Type_get_extent(simgrid::smpi::Datatype::f2c(*datatype), lb, extent); -} - -void mpi_type_get_true_extent_ (int* datatype, MPI_Aint * lb, MPI_Aint * extent, int* ierr){ - - *ierr = MPI_Type_get_true_extent(simgrid::smpi::Datatype::f2c(*datatype), lb, extent); -} - -void mpi_op_create_ (void * function, int* commute, int* op, int* ierr){ - MPI_Op tmp; - *ierr = MPI_Op_create(reinterpret_cast(function),*commute, &tmp); - if(*ierr == MPI_SUCCESS) { - tmp->set_fortran_op(); - *op = tmp->add_f(); - } -} - -void mpi_op_free_ (int* op, int* ierr){ - MPI_Op tmp= simgrid::smpi::Op::f2c(*op); - *ierr = MPI_Op_free(& tmp); - if(*ierr == MPI_SUCCESS) { - simgrid::smpi::F2C::free_f(*op); - } -} - -void mpi_group_free_ (int* group, int* ierr){ - MPI_Group tmp = simgrid::smpi::Group::f2c(*group); - *ierr = MPI_Group_free(&tmp); - if(*ierr == MPI_SUCCESS) { - simgrid::smpi::F2C::free_f(*group); - } -} - -void mpi_group_size_ (int* group, int *size, int* ierr){ - - *ierr = MPI_Group_size(simgrid::smpi::Group::f2c(*group), size); -} - -void mpi_group_rank_ (int* group, int *rank, int* ierr){ - - *ierr = MPI_Group_rank(simgrid::smpi::Group::f2c(*group), rank); -} - -void mpi_group_translate_ranks_ (int* group1, int* n, int *ranks1, int* group2, int *ranks2, int* ierr) -{ - - *ierr = MPI_Group_translate_ranks(simgrid::smpi::Group::f2c(*group1), *n, ranks1, simgrid::smpi::Group::f2c(*group2), ranks2); -} - -void mpi_group_compare_ (int* group1, int* group2, int *result, int* ierr){ - - *ierr = MPI_Group_compare(simgrid::smpi::Group::f2c(*group1), simgrid::smpi::Group::f2c(*group2), result); -} - -void mpi_group_union_ (int* group1, int* group2, int* newgroup, int* ierr){ - MPI_Group tmp; - *ierr = MPI_Group_union(simgrid::smpi::Group::f2c(*group1), simgrid::smpi::Group::f2c(*group2), &tmp); - if(*ierr == MPI_SUCCESS) { - *newgroup = tmp->add_f(); - } -} - -void mpi_group_intersection_ (int* group1, int* group2, int* newgroup, int* ierr){ - MPI_Group tmp; - *ierr = MPI_Group_intersection(simgrid::smpi::Group::f2c(*group1), simgrid::smpi::Group::f2c(*group2), &tmp); - if(*ierr == MPI_SUCCESS) { - *newgroup = tmp->add_f(); - } -} - -void mpi_group_difference_ (int* group1, int* group2, int* newgroup, int* ierr){ - MPI_Group tmp; - *ierr = MPI_Group_difference(simgrid::smpi::Group::f2c(*group1), simgrid::smpi::Group::f2c(*group2), &tmp); - if(*ierr == MPI_SUCCESS) { - *newgroup = tmp->add_f(); - } -} - -void mpi_group_excl_ (int* group, int* n, int *ranks, int* newgroup, int* ierr){ - MPI_Group tmp; - *ierr = MPI_Group_excl(simgrid::smpi::Group::f2c(*group), *n, ranks, &tmp); - if(*ierr == MPI_SUCCESS) { - *newgroup = tmp->add_f(); - } -} - -void mpi_group_range_incl_ (int* group, int* n, int ranges[][3], int* newgroup, int* ierr) -{ - MPI_Group tmp; - *ierr = MPI_Group_range_incl(simgrid::smpi::Group::f2c(*group), *n, ranges, &tmp); - if(*ierr == MPI_SUCCESS) { - *newgroup = tmp->add_f(); - } -} - -void mpi_group_range_excl_ (int* group, int* n, int ranges[][3], int* newgroup, int* ierr) -{ - MPI_Group tmp; - *ierr = MPI_Group_range_excl(simgrid::smpi::Group::f2c(*group), *n, ranges, &tmp); - if(*ierr == MPI_SUCCESS) { - *newgroup = tmp->add_f(); - } -} - -void mpi_comm_get_attr_ (int* comm, int* comm_keyval, void *attribute_val, int *flag, int* ierr){ - - *ierr = MPI_Comm_get_attr (simgrid::smpi::Comm::f2c(*comm), *comm_keyval, attribute_val, flag); -} - -void mpi_comm_set_attr_ (int* comm, int* comm_keyval, void *attribute_val, int* ierr){ - - *ierr = MPI_Comm_set_attr ( simgrid::smpi::Comm::f2c(*comm), *comm_keyval, attribute_val); -} - -void mpi_comm_delete_attr_ (int* comm, int* comm_keyval, int* ierr){ - - *ierr = MPI_Comm_delete_attr (simgrid::smpi::Comm::f2c(*comm), *comm_keyval); -} - -void mpi_comm_create_keyval_ (void* copy_fn, void* delete_fn, int* keyval, void* extra_state, int* ierr){ - - *ierr = MPI_Comm_create_keyval(reinterpret_cast(copy_fn), reinterpret_cast(delete_fn), - keyval, extra_state) ; -} - -void mpi_comm_free_keyval_ (int* keyval, int* ierr) { - *ierr = MPI_Comm_free_keyval( keyval); -} - -void mpi_comm_get_name_ (int* comm, char* name, int* len, int* ierr){ - *ierr = MPI_Comm_get_name(simgrid::smpi::Comm::f2c(*comm), name, len); - if(*len>0) - name[*len]=' '; -} - -void mpi_comm_compare_ (int* comm1, int* comm2, int *result, int* ierr){ - - *ierr = MPI_Comm_compare(simgrid::smpi::Comm::f2c(*comm1), simgrid::smpi::Comm::f2c(*comm2), result); -} - -void mpi_comm_disconnect_ (int* comm, int* ierr){ - MPI_Comm tmp = simgrid::smpi::Comm::f2c(*comm); - *ierr = MPI_Comm_disconnect(&tmp); - if(*ierr == MPI_SUCCESS) { - simgrid::smpi::Comm::free_f(*comm); - } -} - -void mpi_request_free_ (int* request, int* ierr){ - MPI_Request tmp=simgrid::smpi::Request::f2c(*request); - *ierr = MPI_Request_free(&tmp); - if(*ierr == MPI_SUCCESS) { - simgrid::smpi::Request::free_f(*request); - } -} - -void mpi_sendrecv_replace_ (void *buf, int* count, int* datatype, int* dst, int* sendtag, int* src, int* recvtag, - int* comm, MPI_Status* status, int* ierr) -{ - *ierr = MPI_Sendrecv_replace(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *sendtag, *src, - *recvtag, simgrid::smpi::Comm::f2c(*comm), FORT_STATUS_IGNORE(status)); -} - -void mpi_testany_ (int* count, int* requests, int *index, int *flag, MPI_Status* status, int* ierr) -{ - MPI_Request* reqs; - int i; - - reqs = xbt_new(MPI_Request, *count); - for(i = 0; i < *count; i++) { - reqs[i] = simgrid::smpi::Request::f2c(requests[i]); - } - *ierr = MPI_Testany(*count, reqs, index, flag, FORT_STATUS_IGNORE(status)); - if(*index!=MPI_UNDEFINED && reqs[*index]==MPI_REQUEST_NULL){ - simgrid::smpi::Request::free_f(requests[*index]); - requests[*index]=MPI_FORTRAN_REQUEST_NULL; - } - xbt_free(reqs); -} - -void mpi_waitsome_ (int* incount, int* requests, int *outcount, int *indices, MPI_Status* status, int* ierr) -{ - MPI_Request* reqs; - int i; - - reqs = xbt_new(MPI_Request, *incount); - for(i = 0; i < *incount; i++) { - reqs[i] = simgrid::smpi::Request::f2c(requests[i]); - } - *ierr = MPI_Waitsome(*incount, reqs, outcount, indices, status); - for(i=0;i<*outcount;i++){ - if(reqs[indices[i]]==MPI_REQUEST_NULL){ - simgrid::smpi::Request::free_f(requests[indices[i]]); - requests[indices[i]]=MPI_FORTRAN_REQUEST_NULL; - } - } - xbt_free(reqs); -} - -void mpi_reduce_local_ (void *inbuf, void *inoutbuf, int* count, int* datatype, int* op, int* ierr){ - - *ierr = MPI_Reduce_local(inbuf, inoutbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op)); -} - -void mpi_reduce_scatter_block_ (void *sendbuf, void *recvbuf, int* recvcount, int* datatype, int* op, int* comm, - int* ierr) -{ - sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); - *ierr = MPI_Reduce_scatter_block(sendbuf, recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op), - simgrid::smpi::Comm::f2c(*comm)); -} - -void mpi_pack_size_ (int* incount, int* datatype, int* comm, int* size, int* ierr) { - *ierr = MPI_Pack_size(*incount, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Comm::f2c(*comm), size); -} - -void mpi_cart_coords_ (int* comm, int* rank, int* maxdims, int* coords, int* ierr) { - *ierr = MPI_Cart_coords(simgrid::smpi::Comm::f2c(*comm), *rank, *maxdims, coords); -} - -void mpi_cart_create_ (int* comm_old, int* ndims, int* dims, int* periods, int* reorder, int* comm_cart, int* ierr) { - MPI_Comm tmp; - *ierr = MPI_Cart_create(simgrid::smpi::Comm::f2c(*comm_old), *ndims, dims, periods, *reorder, &tmp); - if(*ierr == MPI_SUCCESS) { - *comm_cart = tmp->add_f(); - } -} - -void mpi_cart_get_ (int* comm, int* maxdims, int* dims, int* periods, int* coords, int* ierr) { - *ierr = MPI_Cart_get(simgrid::smpi::Comm::f2c(*comm), *maxdims, dims, periods, coords); -} - -void mpi_cart_map_ (int* comm_old, int* ndims, int* dims, int* periods, int* newrank, int* ierr) { - *ierr = MPI_Cart_map(simgrid::smpi::Comm::f2c(*comm_old), *ndims, dims, periods, newrank); -} - -void mpi_cart_rank_ (int* comm, int* coords, int* rank, int* ierr) { - *ierr = MPI_Cart_rank(simgrid::smpi::Comm::f2c(*comm), coords, rank); -} - -void mpi_cart_shift_ (int* comm, int* direction, int* displ, int* source, int* dest, int* ierr) { - *ierr = MPI_Cart_shift(simgrid::smpi::Comm::f2c(*comm), *direction, *displ, source, dest); -} - -void mpi_cart_sub_ (int* comm, int* remain_dims, int* comm_new, int* ierr) { - MPI_Comm tmp; - *ierr = MPI_Cart_sub(simgrid::smpi::Comm::f2c(*comm), remain_dims, &tmp); - if(*ierr == MPI_SUCCESS) { - *comm_new = tmp->add_f(); - } -} - -void mpi_cartdim_get_ (int* comm, int* ndims, int* ierr) { - *ierr = MPI_Cartdim_get(simgrid::smpi::Comm::f2c(*comm), ndims); -} - -void mpi_graph_create_ (int* comm_old, int* nnodes, int* index, int* edges, int* reorder, int* comm_graph, int* ierr) { - MPI_Comm tmp; - *ierr = MPI_Graph_create(simgrid::smpi::Comm::f2c(*comm_old), *nnodes, index, edges, *reorder, &tmp); - if(*ierr == MPI_SUCCESS) { - *comm_graph = tmp->add_f(); - } -} - -void mpi_graph_get_ (int* comm, int* maxindex, int* maxedges, int* index, int* edges, int* ierr) { - *ierr = MPI_Graph_get(simgrid::smpi::Comm::f2c(*comm), *maxindex, *maxedges, index, edges); -} - -void mpi_graph_map_ (int* comm_old, int* nnodes, int* index, int* edges, int* newrank, int* ierr) { - *ierr = MPI_Graph_map(simgrid::smpi::Comm::f2c(*comm_old), *nnodes, index, edges, newrank); -} - -void mpi_graph_neighbors_ (int* comm, int* rank, int* maxneighbors, int* neighbors, int* ierr) { - *ierr = MPI_Graph_neighbors(simgrid::smpi::Comm::f2c(*comm), *rank, *maxneighbors, neighbors); -} - -void mpi_graph_neighbors_count_ (int* comm, int* rank, int* nneighbors, int* ierr) { - *ierr = MPI_Graph_neighbors_count(simgrid::smpi::Comm::f2c(*comm), *rank, nneighbors); -} - -void mpi_graphdims_get_ (int* comm, int* nnodes, int* nedges, int* ierr) { - *ierr = MPI_Graphdims_get(simgrid::smpi::Comm::f2c(*comm), nnodes, nedges); -} - -void mpi_topo_test_ (int* comm, int* top_type, int* ierr) { - *ierr = MPI_Topo_test(simgrid::smpi::Comm::f2c(*comm), top_type); -} - -void mpi_error_class_ (int* errorcode, int* errorclass, int* ierr) { - *ierr = MPI_Error_class(*errorcode, errorclass); -} - -void mpi_errhandler_create_ (void* function, void* errhandler, int* ierr) { - *ierr = MPI_Errhandler_create(reinterpret_cast(function), static_cast(errhandler)); -} - -void mpi_errhandler_free_ (void* errhandler, int* ierr) { - *ierr = MPI_Errhandler_free(static_cast(errhandler)); -} - -void mpi_errhandler_get_ (int* comm, void* errhandler, int* ierr) { - *ierr = MPI_Errhandler_get(simgrid::smpi::Comm::f2c(*comm), static_cast(errhandler)); -} - -void mpi_errhandler_set_ (int* comm, void* errhandler, int* ierr) { - *ierr = MPI_Errhandler_set(simgrid::smpi::Comm::f2c(*comm), *static_cast(errhandler)); -} - -void mpi_comm_set_errhandler_ (int* comm, void* errhandler, int* ierr) { - *ierr = MPI_Errhandler_set(simgrid::smpi::Comm::f2c(*comm), *static_cast(errhandler)); -} - -void mpi_comm_get_errhandler_ (int* comm, void* errhandler, int* ierr) { - *ierr = MPI_Errhandler_set(simgrid::smpi::Comm::f2c(*comm), static_cast(errhandler)); -} - -void mpi_type_contiguous_ (int* count, int* old_type, int* newtype, int* ierr) { - MPI_Datatype tmp; - *ierr = MPI_Type_contiguous(*count, simgrid::smpi::Datatype::f2c(*old_type), &tmp); - if(*ierr == MPI_SUCCESS) { - *newtype = tmp->add_f(); - } -} - -void mpi_cancel_ (int* request, int* ierr) { - MPI_Request tmp=simgrid::smpi::Request::f2c(*request); - *ierr = MPI_Cancel(&tmp); -} - -void mpi_buffer_attach_ (void* buffer, int* size, int* ierr) { - *ierr = MPI_Buffer_attach(buffer, *size); -} - -void mpi_buffer_detach_ (void* buffer, int* size, int* ierr) { - *ierr = MPI_Buffer_detach(buffer, size); -} - -void mpi_testsome_ (int* incount, int* requests, int* outcount, int* indices, MPI_Status* statuses, int* ierr) { - MPI_Request* reqs; - int i; - - reqs = xbt_new(MPI_Request, *incount); - for(i = 0; i < *incount; i++) { - reqs[i] = simgrid::smpi::Request::f2c(requests[i]); - indices[i]=0; - } - *ierr = MPI_Testsome(*incount, reqs, outcount, indices, FORT_STATUSES_IGNORE(statuses)); - for(i=0;i<*incount;i++){ - if(indices[i] && reqs[indices[i]]==MPI_REQUEST_NULL){ - simgrid::smpi::Request::free_f(requests[indices[i]]); - requests[indices[i]]=MPI_FORTRAN_REQUEST_NULL; - } - } - xbt_free(reqs); -} - -void mpi_comm_test_inter_ (int* comm, int* flag, int* ierr) { - *ierr = MPI_Comm_test_inter(simgrid::smpi::Comm::f2c(*comm), flag); -} - -void mpi_unpack_ (void* inbuf, int* insize, int* position, void* outbuf, int* outcount, int* type, int* comm, - int* ierr) { - *ierr = MPI_Unpack(inbuf, *insize, position, outbuf, *outcount, simgrid::smpi::Datatype::f2c(*type), simgrid::smpi::Comm::f2c(*comm)); -} - -void mpi_pack_external_size_ (char *datarep, int* incount, int* datatype, MPI_Aint *size, int* ierr){ - *ierr = MPI_Pack_external_size(datarep, *incount, simgrid::smpi::Datatype::f2c(*datatype), size); -} - -void mpi_pack_external_ (char *datarep, void *inbuf, int* incount, int* datatype, void *outbuf, MPI_Aint* outcount, - MPI_Aint *position, int* ierr){ - *ierr = MPI_Pack_external(datarep, inbuf, *incount, simgrid::smpi::Datatype::f2c(*datatype), outbuf, *outcount, position); -} - -void mpi_unpack_external_ ( char *datarep, void *inbuf, MPI_Aint* insize, MPI_Aint *position, void *outbuf, - int* outcount, int* datatype, int* ierr){ - *ierr = MPI_Unpack_external( datarep, inbuf, *insize, position, outbuf, *outcount, simgrid::smpi::Datatype::f2c(*datatype)); -} - -void mpi_type_hindexed_ (int* count, int* blocklens, MPI_Aint* indices, int* old_type, int* newtype, int* ierr) { - MPI_Datatype tmp; - *ierr = MPI_Type_hindexed(*count, blocklens, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp); - if(*ierr == MPI_SUCCESS) { - *newtype = tmp->add_f(); - } -} - -void mpi_type_create_hindexed_(int* count, int* blocklens, MPI_Aint* indices, int* old_type, int* newtype, int* ierr){ - MPI_Datatype tmp; - *ierr = MPI_Type_create_hindexed(*count, blocklens, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp); - if(*ierr == MPI_SUCCESS) { - *newtype = tmp->add_f(); - } -} - -void mpi_type_create_hindexed_block_ (int* count, int* blocklength, MPI_Aint* indices, int* old_type, int* newtype, - int* ierr) { - MPI_Datatype tmp; - *ierr = MPI_Type_create_hindexed_block(*count, *blocklength, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp); - if(*ierr == MPI_SUCCESS) { - *newtype = tmp->add_f(); - } -} - -void mpi_type_indexed_ (int* count, int* blocklens, int* indices, int* old_type, int* newtype, int* ierr) { - MPI_Datatype tmp; - *ierr = MPI_Type_indexed(*count, blocklens, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp); - if(*ierr == MPI_SUCCESS) { - *newtype = tmp->add_f(); - } -} - -void mpi_type_create_indexed_block_ (int* count, int* blocklength, int* indices, int* old_type, int*newtype, - int* ierr){ - MPI_Datatype tmp; - *ierr = MPI_Type_create_indexed_block(*count, *blocklength, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp); - if(*ierr == MPI_SUCCESS) { - *newtype = tmp->add_f(); - } -} - -void mpi_type_struct_ (int* count, int* blocklens, MPI_Aint* indices, int* old_types, int* newtype, int* ierr) { - MPI_Datatype tmp; - int i=0; - MPI_Datatype* types = static_cast(xbt_malloc(*count*sizeof(MPI_Datatype))); - for(i=0; i< *count; i++){ - types[i] = simgrid::smpi::Datatype::f2c(old_types[i]); - } - *ierr = MPI_Type_struct(*count, blocklens, indices, types, &tmp); - if(*ierr == MPI_SUCCESS) { - *newtype = tmp->add_f(); - } - xbt_free(types); -} - -void mpi_type_create_struct_(int* count, int* blocklens, MPI_Aint* indices, int* old_types, int* newtype, int* ierr){ - MPI_Datatype tmp; - int i=0; - MPI_Datatype* types = static_cast(xbt_malloc(*count*sizeof(MPI_Datatype))); - for(i=0; i< *count; i++){ - types[i] = simgrid::smpi::Datatype::f2c(old_types[i]); - } - *ierr = MPI_Type_create_struct(*count, blocklens, indices, types, &tmp); - if(*ierr == MPI_SUCCESS) { - *newtype = tmp->add_f(); - } - xbt_free(types); -} - -void mpi_ssend_ (void* buf, int* count, int* datatype, int* dest, int* tag, int* comm, int* ierr) { - *ierr = MPI_Ssend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm)); -} - -void mpi_ssend_init_ (void* buf, int* count, int* datatype, int* dest, int* tag, int* comm, int* request, int* ierr) { - MPI_Request tmp; - *ierr = MPI_Ssend_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp); - if(*ierr == MPI_SUCCESS) { - *request = tmp->add_f(); - } -} - -void mpi_intercomm_create_ (int* local_comm, int *local_leader, int* peer_comm, int* remote_leader, int* tag, - int* comm_out, int* ierr) { - MPI_Comm tmp; - *ierr = MPI_Intercomm_create(simgrid::smpi::Comm::f2c(*local_comm), *local_leader, simgrid::smpi::Comm::f2c(*peer_comm), *remote_leader, - *tag, &tmp); - if(*ierr == MPI_SUCCESS) { - *comm_out = tmp->add_f(); - } -} - -void mpi_intercomm_merge_ (int* comm, int* high, int* comm_out, int* ierr) { - MPI_Comm tmp; - *ierr = MPI_Intercomm_merge(simgrid::smpi::Comm::f2c(*comm), *high, &tmp); - if(*ierr == MPI_SUCCESS) { - *comm_out = tmp->add_f(); - } -} - -void mpi_bsend_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* ierr) { - *ierr = MPI_Bsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm)); -} - -void mpi_bsend_init_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) { - MPI_Request tmp; - *ierr = MPI_Bsend_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp); - if(*ierr == MPI_SUCCESS) { - *request = tmp->add_f(); - } -} - -void mpi_ibsend_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) { - MPI_Request tmp; - *ierr = MPI_Ibsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp); - if(*ierr == MPI_SUCCESS) { - *request = tmp->add_f(); - } -} - -void mpi_comm_remote_group_ (int* comm, int* group, int* ierr) { - MPI_Group tmp; - *ierr = MPI_Comm_remote_group(simgrid::smpi::Comm::f2c(*comm), &tmp); - if(*ierr == MPI_SUCCESS) { - *group = tmp->c2f(); - } -} - -void mpi_comm_remote_size_ (int* comm, int* size, int* ierr) { - *ierr = MPI_Comm_remote_size(simgrid::smpi::Comm::f2c(*comm), size); -} - -void mpi_issend_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) { - MPI_Request tmp; - *ierr = MPI_Issend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp); - if(*ierr == MPI_SUCCESS) { - *request = tmp->add_f(); - } -} - -void mpi_probe_ (int* source, int* tag, int* comm, MPI_Status* status, int* ierr) { - *ierr = MPI_Probe(*source, *tag, simgrid::smpi::Comm::f2c(*comm), FORT_STATUS_IGNORE(status)); -} - -void mpi_attr_delete_ (int* comm, int* keyval, int* ierr) { - *ierr = MPI_Attr_delete(simgrid::smpi::Comm::f2c(*comm), *keyval); -} - -void mpi_attr_put_ (int* comm, int* keyval, void* attr_value, int* ierr) { - *ierr = MPI_Attr_put(simgrid::smpi::Comm::f2c(*comm), *keyval, attr_value); -} - -void mpi_rsend_init_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) { - MPI_Request tmp; - *ierr = MPI_Rsend_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp); - if(*ierr == MPI_SUCCESS) { - *request = tmp->add_f(); - } -} - -void mpi_keyval_create_ (void* copy_fn, void* delete_fn, int* keyval, void* extra_state, int* ierr) { - *ierr = MPI_Keyval_create(reinterpret_cast(copy_fn),reinterpret_cast(delete_fn), keyval, extra_state); -} - -void mpi_keyval_free_ (int* keyval, int* ierr) { - *ierr = MPI_Keyval_free(keyval); -} - -void mpi_test_cancelled_ (MPI_Status* status, int* flag, int* ierr) { - *ierr = MPI_Test_cancelled(status, flag); -} - -void mpi_pack_ (void* inbuf, int* incount, int* type, void* outbuf, int* outcount, int* position, int* comm, int* ierr) { - *ierr = MPI_Pack(inbuf, *incount, simgrid::smpi::Datatype::f2c(*type), outbuf, *outcount, position, simgrid::smpi::Comm::f2c(*comm)); -} - -void mpi_get_elements_ (MPI_Status* status, int* datatype, int* elements, int* ierr) { - *ierr = MPI_Get_elements(status, simgrid::smpi::Datatype::f2c(*datatype), elements); -} - -void mpi_dims_create_ (int* nnodes, int* ndims, int* dims, int* ierr) { - *ierr = MPI_Dims_create(*nnodes, *ndims, dims); -} - -void mpi_iprobe_ (int* source, int* tag, int* comm, int* flag, MPI_Status* status, int* ierr) { - *ierr = MPI_Iprobe(*source, *tag, simgrid::smpi::Comm::f2c(*comm), flag, status); -} - -void mpi_type_get_envelope_ ( int* datatype, int *num_integers, int *num_addresses, int *num_datatypes, int *combiner, - int* ierr){ - *ierr = MPI_Type_get_envelope( simgrid::smpi::Datatype::f2c(*datatype), num_integers, - num_addresses, num_datatypes, combiner); -} - -void mpi_type_get_contents_ (int* datatype, int* max_integers, int* max_addresses, int* max_datatypes, - int* array_of_integers, MPI_Aint* array_of_addresses, - int* array_of_datatypes, int* ierr){ - *ierr = MPI_Type_get_contents(simgrid::smpi::Datatype::f2c(*datatype), *max_integers, *max_addresses,*max_datatypes, - array_of_integers, array_of_addresses, reinterpret_cast(array_of_datatypes)); -} - -void mpi_type_create_darray_ (int* size, int* rank, int* ndims, int* array_of_gsizes, int* array_of_distribs, - int* array_of_dargs, int* array_of_psizes, - int* order, int* oldtype, int*newtype, int* ierr) { - MPI_Datatype tmp; - *ierr = MPI_Type_create_darray(*size, *rank, *ndims, array_of_gsizes, - array_of_distribs, array_of_dargs, array_of_psizes, - *order, simgrid::smpi::Datatype::f2c(*oldtype), &tmp) ; - if(*ierr == MPI_SUCCESS) { - *newtype = tmp->add_f(); - } -} - -void mpi_type_create_resized_ (int* oldtype,MPI_Aint* lb, MPI_Aint* extent, int*newtype, int* ierr){ - MPI_Datatype tmp; - *ierr = MPI_Type_create_resized(simgrid::smpi::Datatype::f2c(*oldtype),*lb, *extent, &tmp); - if(*ierr == MPI_SUCCESS) { - *newtype = tmp->add_f(); - } -} - -void mpi_type_create_subarray_ (int* ndims,int *array_of_sizes, int *array_of_subsizes, int *array_of_starts, - int* order, int* oldtype, int*newtype, int* ierr){ - MPI_Datatype tmp; - *ierr = MPI_Type_create_subarray(*ndims,array_of_sizes, array_of_subsizes, array_of_starts, *order, - simgrid::smpi::Datatype::f2c(*oldtype), &tmp); - if(*ierr == MPI_SUCCESS) { - *newtype = tmp->add_f(); - } -} - -void mpi_type_match_size_ (int* typeclass,int* size,int* datatype, int* ierr){ - MPI_Datatype tmp; - *ierr = MPI_Type_match_size(*typeclass,*size,&tmp); - if(*ierr == MPI_SUCCESS) { - *datatype = tmp->c2f(); - } -} - -void mpi_alltoallw_ ( void *sendbuf, int *sendcnts, int *sdispls, int* sendtypes, void *recvbuf, int *recvcnts, - int *rdispls, int* recvtypes, int* comm, int* ierr){ - *ierr = MPI_Alltoallw( sendbuf, sendcnts, sdispls, reinterpret_cast(sendtypes), recvbuf, recvcnts, rdispls, - reinterpret_cast(recvtypes), simgrid::smpi::Comm::f2c(*comm)); -} - -void mpi_exscan_ (void *sendbuf, void *recvbuf, int* count, int* datatype, int* op, int* comm, int* ierr){ - *ierr = MPI_Exscan(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm)); -} - -void mpi_comm_set_name_ (int* comm, char* name, int* ierr, int size){ - char* tname = xbt_new(char, size+1); - strncpy(tname, name, size); - tname[size]='\0'; - *ierr = MPI_Comm_set_name (simgrid::smpi::Comm::f2c(*comm), tname); - xbt_free(tname); -} - -void mpi_comm_dup_with_info_ (int* comm, int* info, int* newcomm, int* ierr){ - MPI_Comm tmp; - *ierr = MPI_Comm_dup_with_info(simgrid::smpi::Comm::f2c(*comm), simgrid::smpi::Info::f2c(*info),&tmp); - if(*ierr == MPI_SUCCESS) { - *newcomm = tmp->add_f(); - } -} - -void mpi_comm_split_type_ (int* comm, int* split_type, int* key, int* info, int* newcomm, int* ierr){ - MPI_Comm tmp; - *ierr = MPI_Comm_split_type(simgrid::smpi::Comm::f2c(*comm), *split_type, *key, simgrid::smpi::Info::f2c(*info), &tmp); - if(*ierr == MPI_SUCCESS) { - *newcomm = tmp->add_f(); - } -} - -void mpi_comm_set_info_ (int* comm, int* info, int* ierr){ - *ierr = MPI_Comm_set_info (simgrid::smpi::Comm::f2c(*comm), simgrid::smpi::Info::f2c(*info)); -} - -void mpi_comm_get_info_ (int* comm, int* info, int* ierr){ - MPI_Info tmp; - *ierr = MPI_Comm_get_info (simgrid::smpi::Comm::f2c(*comm), &tmp); - if(*ierr==MPI_SUCCESS){ - *info = tmp->c2f(); - } -} - -void mpi_comm_create_errhandler_ ( void *function, void *errhandler, int* ierr){ - *ierr = MPI_Comm_create_errhandler( reinterpret_cast(function), static_cast(errhandler)); -} - -void mpi_add_error_class_ ( int *errorclass, int* ierr){ - *ierr = MPI_Add_error_class( errorclass); -} - -void mpi_add_error_code_ ( int* errorclass, int *errorcode, int* ierr){ - *ierr = MPI_Add_error_code(*errorclass, errorcode); -} - -void mpi_add_error_string_ ( int* errorcode, char *string, int* ierr){ - *ierr = MPI_Add_error_string(*errorcode, string); -} - -void mpi_comm_call_errhandler_ (int* comm,int* errorcode, int* ierr){ - *ierr = MPI_Comm_call_errhandler(simgrid::smpi::Comm::f2c(*comm), *errorcode); -} - -void mpi_info_dup_ (int* info, int* newinfo, int* ierr){ - MPI_Info tmp; - *ierr = MPI_Info_dup(simgrid::smpi::Info::f2c(*info), &tmp); - if(*ierr==MPI_SUCCESS){ - *newinfo= tmp->add_f(); - } -} - -void mpi_info_get_valuelen_ ( int* info, char *key, int *valuelen, int *flag, int* ierr, unsigned int keylen){ - while(key[keylen-1]==' ') - keylen--; - while(*key==' '){//handle leading blanks - keylen--; - key++; - } - char* tkey = xbt_new(char, keylen+1); - strncpy(tkey, key, keylen); - tkey[keylen]='\0'; - *ierr = MPI_Info_get_valuelen( simgrid::smpi::Info::f2c(*info), tkey, valuelen, flag); - xbt_free(tkey); -} - -void mpi_info_delete_ (int* info, char *key, int* ierr, unsigned int keylen){ - while(key[keylen-1]==' ') - keylen--; - while(*key==' '){//handle leading blanks - keylen--; - key++; - } - char* tkey = xbt_new(char, keylen+1); - strncpy(tkey, key, keylen); - tkey[keylen]='\0'; - *ierr = MPI_Info_delete(simgrid::smpi::Info::f2c(*info), tkey); - xbt_free(tkey); -} - -void mpi_info_get_nkeys_ ( int* info, int *nkeys, int* ierr){ - *ierr = MPI_Info_get_nkeys( simgrid::smpi::Info::f2c(*info), nkeys); -} - -void mpi_info_get_nthkey_ ( int* info, int* n, char *key, int* ierr, unsigned int keylen){ - *ierr = MPI_Info_get_nthkey( simgrid::smpi::Info::f2c(*info), *n, key); - unsigned int i = 0; - for (i=strlen(key); i(query_fn), reinterpret_cast(free_fn), - reinterpret_cast(cancel_fn), extra_state, &tmp); - if(*ierr == MPI_SUCCESS) { - *request = tmp->add_f(); - } -} - -void mpi_grequest_complete_ ( int* request, int* ierr){ - *ierr = MPI_Grequest_complete( simgrid::smpi::Request::f2c(*request)); -} - -void mpi_status_set_cancelled_ (MPI_Status* status,int* flag, int* ierr){ - *ierr = MPI_Status_set_cancelled(status,*flag); -} - -void mpi_status_set_elements_ ( MPI_Status* status, int* datatype, int* count, int* ierr){ - *ierr = MPI_Status_set_elements( status, simgrid::smpi::Datatype::f2c(*datatype), *count); -} - -void mpi_comm_connect_ ( char *port_name, int* info, int* root, int* comm, int*newcomm, int* ierr){ - MPI_Comm tmp; - *ierr = MPI_Comm_connect( port_name, *reinterpret_cast(info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp); - if(*ierr == MPI_SUCCESS) { - *newcomm = tmp->add_f(); - } -} - -void mpi_publish_name_ ( char *service_name, int* info, char *port_name, int* ierr){ - *ierr = MPI_Publish_name( service_name, *reinterpret_cast(info), port_name); -} - -void mpi_unpublish_name_ ( char *service_name, int* info, char *port_name, int* ierr){ - *ierr = MPI_Unpublish_name( service_name, *reinterpret_cast(info), port_name); -} - -void mpi_lookup_name_ ( char *service_name, int* info, char *port_name, int* ierr){ - *ierr = MPI_Lookup_name( service_name, *reinterpret_cast(info), port_name); -} - -void mpi_comm_join_ ( int* fd, int* intercomm, int* ierr){ - MPI_Comm tmp; - *ierr = MPI_Comm_join( *fd, &tmp); - if(*ierr == MPI_SUCCESS) { - *intercomm = tmp->add_f(); - } -} - -void mpi_open_port_ ( int* info, char *port_name, int* ierr){ - *ierr = MPI_Open_port( *reinterpret_cast(info),port_name); -} - -void mpi_close_port_ ( char *port_name, int* ierr){ - *ierr = MPI_Close_port( port_name); -} - -void mpi_comm_accept_ ( char *port_name, int* info, int* root, int* comm, int*newcomm, int* ierr){ - MPI_Comm tmp; - *ierr = MPI_Comm_accept( port_name, *reinterpret_cast(info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp); - if(*ierr == MPI_SUCCESS) { - *newcomm = tmp->add_f(); - } -} - -void mpi_comm_spawn_ ( char *command, char *argv, int* maxprocs, int* info, int* root, int* comm, int* intercomm, - int* array_of_errcodes, int* ierr){ - MPI_Comm tmp; - *ierr = MPI_Comm_spawn( command, nullptr, *maxprocs, *reinterpret_cast(info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp, - array_of_errcodes); - if(*ierr == MPI_SUCCESS) { - *intercomm = tmp->add_f(); - } -} - -void mpi_comm_spawn_multiple_ ( int* count, char *array_of_commands, char** array_of_argv, int* array_of_maxprocs, - int* array_of_info, int* root, - int* comm, int* intercomm, int* array_of_errcodes, int* ierr){ - MPI_Comm tmp; - *ierr = MPI_Comm_spawn_multiple(* count, &array_of_commands, &array_of_argv, array_of_maxprocs, - reinterpret_cast(array_of_info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp, array_of_errcodes); - if(*ierr == MPI_SUCCESS) { - *intercomm = tmp->add_f(); - } -} - -void mpi_comm_get_parent_ ( int* parent, int* ierr){ - MPI_Comm tmp; - *ierr = MPI_Comm_get_parent( &tmp); - if(*ierr == MPI_SUCCESS) { - *parent = tmp->c2f(); - } -} - -void mpi_file_close_ ( int* file, int* ierr){ - *ierr= MPI_File_close(reinterpret_cast(*file)); -} - -void mpi_file_delete_ ( char* filename, int* info, int* ierr){ - *ierr= MPI_File_delete(filename, simgrid::smpi::Info::f2c(*info)); -} - -void mpi_file_open_ ( int* comm, char* filename, int* amode, int* info, int* fh, int* ierr){ - *ierr= MPI_File_open(simgrid::smpi::Comm::f2c(*comm), filename, *amode, simgrid::smpi::Info::f2c(*info), reinterpret_cast(*fh)); -} - -void mpi_file_set_view_ ( int* fh, long long int* offset, int* etype, int* filetype, char* datarep, int* info, int* ierr){ - *ierr= MPI_File_set_view(reinterpret_cast(*fh) , reinterpret_cast(*offset), simgrid::smpi::Datatype::f2c(*etype), simgrid::smpi::Datatype::f2c(*filetype), datarep, simgrid::smpi::Info::f2c(*info)); -} - -void mpi_file_read_ ( int* fh, void* buf, int* count, int* datatype, MPI_Status* status, int* ierr){ - *ierr= MPI_File_read(reinterpret_cast(*fh), buf, *count, simgrid::smpi::Datatype::f2c(*datatype), status); -} - -void mpi_file_write_ ( int* fh, void* buf, int* count, int* datatype, MPI_Status* status, int* ierr){ - *ierr= MPI_File_write(reinterpret_cast(*fh), buf, *count, simgrid::smpi::Datatype::f2c(*datatype), status); -} - -} // extern "C" diff --git a/src/smpi/smpi_pmpi.cpp b/src/smpi/smpi_pmpi.cpp deleted file mode 100644 index c48bdcf59e..0000000000 --- a/src/smpi/smpi_pmpi.cpp +++ /dev/null @@ -1,3532 +0,0 @@ -/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */ - -/* This program is free software; you can redistribute it and/or modify it - * under the terms of the license (GNU LGPL) which comes with this package. */ - -#include - -#include "simgrid/s4u/Engine.hpp" -#include "simgrid/s4u/Host.hpp" -#include "src/smpi/private.h" -#include "src/smpi/smpi_comm.hpp" -#include "src/smpi/smpi_coll.hpp" -#include "src/smpi/smpi_datatype_derived.hpp" -#include "src/smpi/smpi_op.hpp" -#include "src/smpi/smpi_process.hpp" -#include "src/smpi/smpi_request.hpp" -#include "src/smpi/smpi_status.hpp" -#include "src/smpi/smpi_win.hpp" - -XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_pmpi, smpi, "Logging specific to SMPI (pmpi)"); - -//this function need to be here because of the calls to smpi_bench -void TRACE_smpi_set_category(const char *category) -{ - //need to end bench otherwise categories for execution tasks are wrong - smpi_bench_end(); - TRACE_internal_smpi_set_category (category); - //begin bench after changing process's category - smpi_bench_begin(); -} - -/* PMPI User level calls */ -extern "C" { // Obviously, the C MPI interface should use the C linkage - -int PMPI_Init(int *argc, char ***argv) -{ - xbt_assert(simgrid::s4u::Engine::isInitialized(), - "Your MPI program was not properly initialized. The easiest is to use smpirun to start it."); - // PMPI_Init is called only once per SMPI process - int already_init; - MPI_Initialized(&already_init); - if(already_init == 0){ - simgrid::smpi::Process::init(argc, argv); - smpi_process()->mark_as_initialized(); - int rank = smpi_process()->index(); - TRACE_smpi_init(rank); - TRACE_smpi_computing_init(rank); - instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); - extra->type = TRACING_INIT; - TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); - TRACE_smpi_collective_out(rank, -1, __FUNCTION__); - smpi_bench_begin(); - } - - smpi_mpi_init(); - - return MPI_SUCCESS; -} - -int PMPI_Finalize() -{ - smpi_bench_end(); - int rank = smpi_process()->index(); - instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); - extra->type = TRACING_FINALIZE; - TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); - - smpi_process()->finalize(); - - TRACE_smpi_collective_out(rank, -1, __FUNCTION__); - TRACE_smpi_finalize(smpi_process()->index()); - return MPI_SUCCESS; -} - -int PMPI_Finalized(int* flag) -{ - *flag=smpi_process()!=nullptr ? smpi_process()->finalized() : 0; - return MPI_SUCCESS; -} - -int PMPI_Get_version (int *version,int *subversion){ - *version = MPI_VERSION; - *subversion= MPI_SUBVERSION; - return MPI_SUCCESS; -} - -int PMPI_Get_library_version (char *version,int *len){ - smpi_bench_end(); - snprintf(version, MPI_MAX_LIBRARY_VERSION_STRING, "SMPI Version %d.%d. Copyright The Simgrid Team 2007-2017", - SIMGRID_VERSION_MAJOR, SIMGRID_VERSION_MINOR); - *len = strlen(version) > MPI_MAX_LIBRARY_VERSION_STRING ? MPI_MAX_LIBRARY_VERSION_STRING : strlen(version); - smpi_bench_begin(); - return MPI_SUCCESS; -} - -int PMPI_Init_thread(int *argc, char ***argv, int required, int *provided) -{ - if (provided != nullptr) { - *provided = MPI_THREAD_SINGLE; - } - return MPI_Init(argc, argv); -} - -int PMPI_Query_thread(int *provided) -{ - if (provided == nullptr) { - return MPI_ERR_ARG; - } else { - *provided = MPI_THREAD_SINGLE; - return MPI_SUCCESS; - } -} - -int PMPI_Is_thread_main(int *flag) -{ - if (flag == nullptr) { - return MPI_ERR_ARG; - } else { - *flag = smpi_process()->index() == 0; - return MPI_SUCCESS; - } -} - -int PMPI_Abort(MPI_Comm comm, int errorcode) -{ - smpi_bench_end(); - // FIXME: should kill all processes in comm instead - simcall_process_kill(SIMIX_process_self()); - return MPI_SUCCESS; -} - -double PMPI_Wtime() -{ - return smpi_mpi_wtime(); -} - -extern double sg_maxmin_precision; -double PMPI_Wtick() -{ - return sg_maxmin_precision; -} - -int PMPI_Address(void *location, MPI_Aint * address) -{ - if (address==nullptr) { - return MPI_ERR_ARG; - } else { - *address = reinterpret_cast(location); - return MPI_SUCCESS; - } -} - -int PMPI_Get_address(void *location, MPI_Aint * address) -{ - return PMPI_Address(location, address); -} - -int PMPI_Type_free(MPI_Datatype * datatype) -{ - /* Free a predefined datatype is an error according to the standard, and should be checked for */ - if (*datatype == MPI_DATATYPE_NULL) { - return MPI_ERR_ARG; - } else { - simgrid::smpi::Datatype::unref(*datatype); - return MPI_SUCCESS; - } -} - -int PMPI_Type_size(MPI_Datatype datatype, int *size) -{ - if (datatype == MPI_DATATYPE_NULL) { - return MPI_ERR_TYPE; - } else if (size == nullptr) { - return MPI_ERR_ARG; - } else { - *size = static_cast(datatype->size()); - return MPI_SUCCESS; - } -} - -int PMPI_Type_size_x(MPI_Datatype datatype, MPI_Count *size) -{ - if (datatype == MPI_DATATYPE_NULL) { - return MPI_ERR_TYPE; - } else if (size == nullptr) { - return MPI_ERR_ARG; - } else { - *size = static_cast(datatype->size()); - return MPI_SUCCESS; - } -} - -int PMPI_Type_get_extent(MPI_Datatype datatype, MPI_Aint * lb, MPI_Aint * extent) -{ - if (datatype == MPI_DATATYPE_NULL) { - return MPI_ERR_TYPE; - } else if (lb == nullptr || extent == nullptr) { - return MPI_ERR_ARG; - } else { - return datatype->extent(lb, extent); - } -} - -int PMPI_Type_get_true_extent(MPI_Datatype datatype, MPI_Aint * lb, MPI_Aint * extent) -{ - return PMPI_Type_get_extent(datatype, lb, extent); -} - -int PMPI_Type_extent(MPI_Datatype datatype, MPI_Aint * extent) -{ - if (datatype == MPI_DATATYPE_NULL) { - return MPI_ERR_TYPE; - } else if (extent == nullptr) { - return MPI_ERR_ARG; - } else { - *extent = datatype->get_extent(); - return MPI_SUCCESS; - } -} - -int PMPI_Type_lb(MPI_Datatype datatype, MPI_Aint * disp) -{ - if (datatype == MPI_DATATYPE_NULL) { - return MPI_ERR_TYPE; - } else if (disp == nullptr) { - return MPI_ERR_ARG; - } else { - *disp = datatype->lb(); - return MPI_SUCCESS; - } -} - -int PMPI_Type_ub(MPI_Datatype datatype, MPI_Aint * disp) -{ - if (datatype == MPI_DATATYPE_NULL) { - return MPI_ERR_TYPE; - } else if (disp == nullptr) { - return MPI_ERR_ARG; - } else { - *disp = datatype->ub(); - return MPI_SUCCESS; - } -} - -int PMPI_Type_dup(MPI_Datatype datatype, MPI_Datatype *newtype){ - int retval = MPI_SUCCESS; - if (datatype == MPI_DATATYPE_NULL) { - retval=MPI_ERR_TYPE; - } else { - *newtype = new simgrid::smpi::Datatype(datatype, &retval); - //error when duplicating, free the new datatype - if(retval!=MPI_SUCCESS){ - simgrid::smpi::Datatype::unref(*newtype); - *newtype = MPI_DATATYPE_NULL; - } - } - return retval; -} - -int PMPI_Op_create(MPI_User_function * function, int commute, MPI_Op * op) -{ - if (function == nullptr || op == nullptr) { - return MPI_ERR_ARG; - } else { - *op = new simgrid::smpi::Op(function, (commute!=0)); - return MPI_SUCCESS; - } -} - -int PMPI_Op_free(MPI_Op * op) -{ - if (op == nullptr) { - return MPI_ERR_ARG; - } else if (*op == MPI_OP_NULL) { - return MPI_ERR_OP; - } else { - delete (*op); - *op = MPI_OP_NULL; - return MPI_SUCCESS; - } -} - -int PMPI_Op_commutative(MPI_Op op, int* commute){ - if (op == MPI_OP_NULL) { - return MPI_ERR_OP; - } else if (commute==nullptr){ - return MPI_ERR_ARG; - } else { - *commute = op->is_commutative(); - return MPI_SUCCESS; - } -} - -int PMPI_Group_free(MPI_Group * group) -{ - if (group == nullptr) { - return MPI_ERR_ARG; - } else { - if(*group != MPI_COMM_WORLD->group() && *group != MPI_GROUP_EMPTY) - simgrid::smpi::Group::unref(*group); - *group = MPI_GROUP_NULL; - return MPI_SUCCESS; - } -} - -int PMPI_Group_size(MPI_Group group, int *size) -{ - if (group == MPI_GROUP_NULL) { - return MPI_ERR_GROUP; - } else if (size == nullptr) { - return MPI_ERR_ARG; - } else { - *size = group->size(); - return MPI_SUCCESS; - } -} - -int PMPI_Group_rank(MPI_Group group, int *rank) -{ - if (group == MPI_GROUP_NULL) { - return MPI_ERR_GROUP; - } else if (rank == nullptr) { - return MPI_ERR_ARG; - } else { - *rank = group->rank(smpi_process()->index()); - return MPI_SUCCESS; - } -} - -int PMPI_Group_translate_ranks(MPI_Group group1, int n, int *ranks1, MPI_Group group2, int *ranks2) -{ - if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) { - return MPI_ERR_GROUP; - } else { - for (int i = 0; i < n; i++) { - if(ranks1[i]==MPI_PROC_NULL){ - ranks2[i]=MPI_PROC_NULL; - }else{ - int index = group1->index(ranks1[i]); - ranks2[i] = group2->rank(index); - } - } - return MPI_SUCCESS; - } -} - -int PMPI_Group_compare(MPI_Group group1, MPI_Group group2, int *result) -{ - if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) { - return MPI_ERR_GROUP; - } else if (result == nullptr) { - return MPI_ERR_ARG; - } else { - *result = group1->compare(group2); - return MPI_SUCCESS; - } -} - -int PMPI_Group_union(MPI_Group group1, MPI_Group group2, MPI_Group * newgroup) -{ - - if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) { - return MPI_ERR_GROUP; - } else if (newgroup == nullptr) { - return MPI_ERR_ARG; - } else { - return group1->group_union(group2, newgroup); - } -} - -int PMPI_Group_intersection(MPI_Group group1, MPI_Group group2, MPI_Group * newgroup) -{ - - if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) { - return MPI_ERR_GROUP; - } else if (newgroup == nullptr) { - return MPI_ERR_ARG; - } else { - return group1->intersection(group2,newgroup); - } -} - -int PMPI_Group_difference(MPI_Group group1, MPI_Group group2, MPI_Group * newgroup) -{ - if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) { - return MPI_ERR_GROUP; - } else if (newgroup == nullptr) { - return MPI_ERR_ARG; - } else { - return group1->difference(group2,newgroup); - } -} - -int PMPI_Group_incl(MPI_Group group, int n, int *ranks, MPI_Group * newgroup) -{ - if (group == MPI_GROUP_NULL) { - return MPI_ERR_GROUP; - } else if (newgroup == nullptr) { - return MPI_ERR_ARG; - } else { - return group->incl(n, ranks, newgroup); - } -} - -int PMPI_Group_excl(MPI_Group group, int n, int *ranks, MPI_Group * newgroup) -{ - if (group == MPI_GROUP_NULL) { - return MPI_ERR_GROUP; - } else if (newgroup == nullptr) { - return MPI_ERR_ARG; - } else { - if (n == 0) { - *newgroup = group; - if (group != MPI_COMM_WORLD->group() - && group != MPI_COMM_SELF->group() && group != MPI_GROUP_EMPTY) - group->ref(); - return MPI_SUCCESS; - } else if (n == group->size()) { - *newgroup = MPI_GROUP_EMPTY; - return MPI_SUCCESS; - } else { - return group->excl(n,ranks,newgroup); - } - } -} - -int PMPI_Group_range_incl(MPI_Group group, int n, int ranges[][3], MPI_Group * newgroup) -{ - if (group == MPI_GROUP_NULL) { - return MPI_ERR_GROUP; - } else if (newgroup == nullptr) { - return MPI_ERR_ARG; - } else { - if (n == 0) { - *newgroup = MPI_GROUP_EMPTY; - return MPI_SUCCESS; - } else { - return group->range_incl(n,ranges,newgroup); - } - } -} - -int PMPI_Group_range_excl(MPI_Group group, int n, int ranges[][3], MPI_Group * newgroup) -{ - if (group == MPI_GROUP_NULL) { - return MPI_ERR_GROUP; - } else if (newgroup == nullptr) { - return MPI_ERR_ARG; - } else { - if (n == 0) { - *newgroup = group; - if (group != MPI_COMM_WORLD->group() && group != MPI_COMM_SELF->group() && - group != MPI_GROUP_EMPTY) - group->ref(); - return MPI_SUCCESS; - } else { - return group->range_excl(n,ranges,newgroup); - } - } -} - -int PMPI_Comm_rank(MPI_Comm comm, int *rank) -{ - if (comm == MPI_COMM_NULL) { - return MPI_ERR_COMM; - } else if (rank == nullptr) { - return MPI_ERR_ARG; - } else { - *rank = comm->rank(); - return MPI_SUCCESS; - } -} - -int PMPI_Comm_size(MPI_Comm comm, int *size) -{ - if (comm == MPI_COMM_NULL) { - return MPI_ERR_COMM; - } else if (size == nullptr) { - return MPI_ERR_ARG; - } else { - *size = comm->size(); - return MPI_SUCCESS; - } -} - -int PMPI_Comm_get_name (MPI_Comm comm, char* name, int* len) -{ - if (comm == MPI_COMM_NULL) { - return MPI_ERR_COMM; - } else if (name == nullptr || len == nullptr) { - return MPI_ERR_ARG; - } else { - comm->get_name(name, len); - return MPI_SUCCESS; - } -} - -int PMPI_Comm_group(MPI_Comm comm, MPI_Group * group) -{ - if (comm == MPI_COMM_NULL) { - return MPI_ERR_COMM; - } else if (group == nullptr) { - return MPI_ERR_ARG; - } else { - *group = comm->group(); - if (*group != MPI_COMM_WORLD->group() && *group != MPI_GROUP_NULL && *group != MPI_GROUP_EMPTY) - (*group)->ref(); - return MPI_SUCCESS; - } -} - -int PMPI_Comm_compare(MPI_Comm comm1, MPI_Comm comm2, int *result) -{ - if (comm1 == MPI_COMM_NULL || comm2 == MPI_COMM_NULL) { - return MPI_ERR_COMM; - } else if (result == nullptr) { - return MPI_ERR_ARG; - } else { - if (comm1 == comm2) { /* Same communicators means same groups */ - *result = MPI_IDENT; - } else { - *result = comm1->group()->compare(comm2->group()); - if (*result == MPI_IDENT) { - *result = MPI_CONGRUENT; - } - } - return MPI_SUCCESS; - } -} - -int PMPI_Comm_dup(MPI_Comm comm, MPI_Comm * newcomm) -{ - if (comm == MPI_COMM_NULL) { - return MPI_ERR_COMM; - } else if (newcomm == nullptr) { - return MPI_ERR_ARG; - } else { - return comm->dup(newcomm); - } -} - -int PMPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm * newcomm) -{ - if (comm == MPI_COMM_NULL) { - return MPI_ERR_COMM; - } else if (group == MPI_GROUP_NULL) { - return MPI_ERR_GROUP; - } else if (newcomm == nullptr) { - return MPI_ERR_ARG; - } else if(group->rank(smpi_process()->index())==MPI_UNDEFINED){ - *newcomm= MPI_COMM_NULL; - return MPI_SUCCESS; - }else{ - group->ref(); - *newcomm = new simgrid::smpi::Comm(group, nullptr); - return MPI_SUCCESS; - } -} - -int PMPI_Comm_free(MPI_Comm * comm) -{ - if (comm == nullptr) { - return MPI_ERR_ARG; - } else if (*comm == MPI_COMM_NULL) { - return MPI_ERR_COMM; - } else { - simgrid::smpi::Comm::destroy(*comm); - *comm = MPI_COMM_NULL; - return MPI_SUCCESS; - } -} - -int PMPI_Comm_disconnect(MPI_Comm * comm) -{ - /* TODO: wait until all communication in comm are done */ - if (comm == nullptr) { - return MPI_ERR_ARG; - } else if (*comm == MPI_COMM_NULL) { - return MPI_ERR_COMM; - } else { - simgrid::smpi::Comm::destroy(*comm); - *comm = MPI_COMM_NULL; - return MPI_SUCCESS; - } -} - -int PMPI_Comm_split(MPI_Comm comm, int color, int key, MPI_Comm* comm_out) -{ - int retval = 0; - smpi_bench_end(); - - if (comm_out == nullptr) { - retval = MPI_ERR_ARG; - } else if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else { - *comm_out = comm->split(color, key); - retval = MPI_SUCCESS; - } - smpi_bench_begin(); - - return retval; -} - -int PMPI_Comm_create_group(MPI_Comm comm, MPI_Group group, int, MPI_Comm* comm_out) -{ - int retval = 0; - smpi_bench_end(); - - if (comm_out == nullptr) { - retval = MPI_ERR_ARG; - } else if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else { - retval = MPI_Comm_create(comm, group, comm_out); - } - smpi_bench_begin(); - - return retval; -} - -int PMPI_Send_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request * request) -{ - int retval = 0; - - smpi_bench_end(); - if (request == nullptr) { - retval = MPI_ERR_ARG; - } else if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if (not datatype->is_valid()) { - retval = MPI_ERR_TYPE; - } else if (dst == MPI_PROC_NULL) { - retval = MPI_SUCCESS; - } else { - *request = simgrid::smpi::Request::send_init(buf, count, datatype, dst, tag, comm); - retval = MPI_SUCCESS; - } - smpi_bench_begin(); - if (retval != MPI_SUCCESS && request != nullptr) - *request = MPI_REQUEST_NULL; - return retval; -} - -int PMPI_Recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Request * request) -{ - int retval = 0; - - smpi_bench_end(); - if (request == nullptr) { - retval = MPI_ERR_ARG; - } else if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if (not datatype->is_valid()) { - retval = MPI_ERR_TYPE; - } else if (src == MPI_PROC_NULL) { - retval = MPI_SUCCESS; - } else { - *request = simgrid::smpi::Request::recv_init(buf, count, datatype, src, tag, comm); - retval = MPI_SUCCESS; - } - smpi_bench_begin(); - if (retval != MPI_SUCCESS && request != nullptr) - *request = MPI_REQUEST_NULL; - return retval; -} - -int PMPI_Ssend_init(void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request* request) -{ - int retval = 0; - - smpi_bench_end(); - if (request == nullptr) { - retval = MPI_ERR_ARG; - } else if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if (not datatype->is_valid()) { - retval = MPI_ERR_TYPE; - } else if (dst == MPI_PROC_NULL) { - retval = MPI_SUCCESS; - } else { - *request = simgrid::smpi::Request::ssend_init(buf, count, datatype, dst, tag, comm); - retval = MPI_SUCCESS; - } - smpi_bench_begin(); - if (retval != MPI_SUCCESS && request != nullptr) - *request = MPI_REQUEST_NULL; - return retval; -} - -int PMPI_Start(MPI_Request * request) -{ - int retval = 0; - - smpi_bench_end(); - if (request == nullptr || *request == MPI_REQUEST_NULL) { - retval = MPI_ERR_REQUEST; - } else { - (*request)->start(); - retval = MPI_SUCCESS; - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Startall(int count, MPI_Request * requests) -{ - int retval; - smpi_bench_end(); - if (requests == nullptr) { - retval = MPI_ERR_ARG; - } else { - retval = MPI_SUCCESS; - for (int i = 0; i < count; i++) { - if(requests[i] == MPI_REQUEST_NULL) { - retval = MPI_ERR_REQUEST; - } - } - if(retval != MPI_ERR_REQUEST) { - simgrid::smpi::Request::startall(count, requests); - } - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Request_free(MPI_Request * request) -{ - int retval = 0; - - smpi_bench_end(); - if (*request == MPI_REQUEST_NULL) { - retval = MPI_ERR_ARG; - } else { - simgrid::smpi::Request::unref(request); - retval = MPI_SUCCESS; - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Request * request) -{ - int retval = 0; - - smpi_bench_end(); - - if (request == nullptr) { - retval = MPI_ERR_ARG; - } else if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if (src == MPI_PROC_NULL) { - *request = MPI_REQUEST_NULL; - retval = MPI_SUCCESS; - } else if (src!=MPI_ANY_SOURCE && (src >= comm->group()->size() || src <0)){ - retval = MPI_ERR_RANK; - } else if ((count < 0) || (buf==nullptr && count > 0)) { - retval = MPI_ERR_COUNT; - } else if (not datatype->is_valid()) { - retval = MPI_ERR_TYPE; - } else if(tag<0 && tag != MPI_ANY_TAG){ - retval = MPI_ERR_TAG; - } else { - - int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; - int src_traced = comm->group()->index(src); - - instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); - extra->type = TRACING_IRECV; - extra->src = src_traced; - extra->dst = rank; - int known=0; - extra->datatype1 = encode_datatype(datatype, &known); - int dt_size_send = 1; - if(known==0) - dt_size_send = datatype->size(); - extra->send_size = count*dt_size_send; - TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, extra); - - *request = simgrid::smpi::Request::irecv(buf, count, datatype, src, tag, comm); - retval = MPI_SUCCESS; - - TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__); - } - - smpi_bench_begin(); - if (retval != MPI_SUCCESS && request != nullptr) - *request = MPI_REQUEST_NULL; - return retval; -} - - -int PMPI_Isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request * request) -{ - int retval = 0; - - smpi_bench_end(); - if (request == nullptr) { - retval = MPI_ERR_ARG; - } else if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if (dst == MPI_PROC_NULL) { - *request = MPI_REQUEST_NULL; - retval = MPI_SUCCESS; - } else if (dst >= comm->group()->size() || dst <0){ - retval = MPI_ERR_RANK; - } else if ((count < 0) || (buf==nullptr && count > 0)) { - retval = MPI_ERR_COUNT; - } else if (not datatype->is_valid()) { - retval = MPI_ERR_TYPE; - } else if(tag<0 && tag != MPI_ANY_TAG){ - retval = MPI_ERR_TAG; - } else { - int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; - int dst_traced = comm->group()->index(dst); - instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); - extra->type = TRACING_ISEND; - extra->src = rank; - extra->dst = dst_traced; - int known=0; - extra->datatype1 = encode_datatype(datatype, &known); - int dt_size_send = 1; - if(known==0) - dt_size_send = datatype->size(); - extra->send_size = count*dt_size_send; - TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra); - TRACE_smpi_send(rank, rank, dst_traced, tag, count*datatype->size()); - - *request = simgrid::smpi::Request::isend(buf, count, datatype, dst, tag, comm); - retval = MPI_SUCCESS; - - TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__); - } - - smpi_bench_begin(); - if (retval != MPI_SUCCESS && request!=nullptr) - *request = MPI_REQUEST_NULL; - return retval; -} - -int PMPI_Issend(void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request* request) -{ - int retval = 0; - - smpi_bench_end(); - if (request == nullptr) { - retval = MPI_ERR_ARG; - } else if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if (dst == MPI_PROC_NULL) { - *request = MPI_REQUEST_NULL; - retval = MPI_SUCCESS; - } else if (dst >= comm->group()->size() || dst <0){ - retval = MPI_ERR_RANK; - } else if ((count < 0)|| (buf==nullptr && count > 0)) { - retval = MPI_ERR_COUNT; - } else if (not datatype->is_valid()) { - retval = MPI_ERR_TYPE; - } else if(tag<0 && tag != MPI_ANY_TAG){ - retval = MPI_ERR_TAG; - } else { - int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; - int dst_traced = comm->group()->index(dst); - instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); - extra->type = TRACING_ISSEND; - extra->src = rank; - extra->dst = dst_traced; - int known=0; - extra->datatype1 = encode_datatype(datatype, &known); - int dt_size_send = 1; - if(known==0) - dt_size_send = datatype->size(); - extra->send_size = count*dt_size_send; - TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra); - TRACE_smpi_send(rank, rank, dst_traced, tag, count*datatype->size()); - - *request = simgrid::smpi::Request::issend(buf, count, datatype, dst, tag, comm); - retval = MPI_SUCCESS; - - TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__); - } - - smpi_bench_begin(); - if (retval != MPI_SUCCESS && request!=nullptr) - *request = MPI_REQUEST_NULL; - return retval; -} - -int PMPI_Recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status) -{ - int retval = 0; - - smpi_bench_end(); - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if (src == MPI_PROC_NULL) { - simgrid::smpi::Status::empty(status); - status->MPI_SOURCE = MPI_PROC_NULL; - retval = MPI_SUCCESS; - } else if (src!=MPI_ANY_SOURCE && (src >= comm->group()->size() || src <0)){ - retval = MPI_ERR_RANK; - } else if ((count < 0) || (buf==nullptr && count > 0)) { - retval = MPI_ERR_COUNT; - } else if (not datatype->is_valid()) { - retval = MPI_ERR_TYPE; - } else if(tag<0 && tag != MPI_ANY_TAG){ - retval = MPI_ERR_TAG; - } else { - int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; - int src_traced = comm->group()->index(src); - instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); - extra->type = TRACING_RECV; - extra->src = src_traced; - extra->dst = rank; - int known = 0; - extra->datatype1 = encode_datatype(datatype, &known); - int dt_size_send = 1; - if (known == 0) - dt_size_send = datatype->size(); - extra->send_size = count * dt_size_send; - TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, extra); - - simgrid::smpi::Request::recv(buf, count, datatype, src, tag, comm, status); - retval = MPI_SUCCESS; - - // the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE) - if (status != MPI_STATUS_IGNORE) { - src_traced = comm->group()->index(status->MPI_SOURCE); - if (not TRACE_smpi_view_internals()) { - TRACE_smpi_recv(rank, src_traced, rank, tag); - } - } - TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__); - } - - smpi_bench_begin(); - return retval; -} - -int PMPI_Send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) -{ - int retval = 0; - - smpi_bench_end(); - - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if (dst == MPI_PROC_NULL) { - retval = MPI_SUCCESS; - } else if (dst >= comm->group()->size() || dst <0){ - retval = MPI_ERR_RANK; - } else if ((count < 0) || (buf == nullptr && count > 0)) { - retval = MPI_ERR_COUNT; - } else if (not datatype->is_valid()) { - retval = MPI_ERR_TYPE; - } else if(tag < 0 && tag != MPI_ANY_TAG){ - retval = MPI_ERR_TAG; - } else { - int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; - int dst_traced = comm->group()->index(dst); - instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); - extra->type = TRACING_SEND; - extra->src = rank; - extra->dst = dst_traced; - int known = 0; - extra->datatype1 = encode_datatype(datatype, &known); - int dt_size_send = 1; - if (known == 0) { - dt_size_send = datatype->size(); - } - extra->send_size = count*dt_size_send; - TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra); - if (not TRACE_smpi_view_internals()) { - TRACE_smpi_send(rank, rank, dst_traced, tag,count*datatype->size()); - } - - simgrid::smpi::Request::send(buf, count, datatype, dst, tag, comm); - retval = MPI_SUCCESS; - - TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__); - } - - smpi_bench_begin(); - return retval; -} - -int PMPI_Ssend(void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { - int retval = 0; - - smpi_bench_end(); - - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if (dst == MPI_PROC_NULL) { - retval = MPI_SUCCESS; - } else if (dst >= comm->group()->size() || dst <0){ - retval = MPI_ERR_RANK; - } else if ((count < 0) || (buf==nullptr && count > 0)) { - retval = MPI_ERR_COUNT; - } else if (not datatype->is_valid()) { - retval = MPI_ERR_TYPE; - } else if(tag<0 && tag != MPI_ANY_TAG){ - retval = MPI_ERR_TAG; - } else { - int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; - int dst_traced = comm->group()->index(dst); - instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); - extra->type = TRACING_SSEND; - extra->src = rank; - extra->dst = dst_traced; - int known = 0; - extra->datatype1 = encode_datatype(datatype, &known); - int dt_size_send = 1; - if(known == 0) { - dt_size_send = datatype->size(); - } - extra->send_size = count*dt_size_send; - TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra); - TRACE_smpi_send(rank, rank, dst_traced, tag,count*datatype->size()); - - simgrid::smpi::Request::ssend(buf, count, datatype, dst, tag, comm); - retval = MPI_SUCCESS; - - TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__); - } - - smpi_bench_begin(); - return retval; -} - -int PMPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dst, int sendtag, void *recvbuf, - int recvcount, MPI_Datatype recvtype, int src, int recvtag, MPI_Comm comm, MPI_Status * status) -{ - int retval = 0; - - smpi_bench_end(); - - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if (not sendtype->is_valid() || not recvtype->is_valid()) { - retval = MPI_ERR_TYPE; - } else if (src == MPI_PROC_NULL || dst == MPI_PROC_NULL) { - simgrid::smpi::Status::empty(status); - status->MPI_SOURCE = MPI_PROC_NULL; - retval = MPI_SUCCESS; - }else if (dst >= comm->group()->size() || dst <0 || - (src!=MPI_ANY_SOURCE && (src >= comm->group()->size() || src <0))){ - retval = MPI_ERR_RANK; - } else if ((sendcount < 0 || recvcount<0) || - (sendbuf==nullptr && sendcount > 0) || (recvbuf==nullptr && recvcount>0)) { - retval = MPI_ERR_COUNT; - } else if((sendtag<0 && sendtag != MPI_ANY_TAG)||(recvtag<0 && recvtag != MPI_ANY_TAG)){ - retval = MPI_ERR_TAG; - } else { - - int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; - int dst_traced = comm->group()->index(dst); - int src_traced = comm->group()->index(src); - instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); - extra->type = TRACING_SENDRECV; - extra->src = src_traced; - extra->dst = dst_traced; - int known=0; - extra->datatype1 = encode_datatype(sendtype, &known); - int dt_size_send = 1; - if(known==0) - dt_size_send = sendtype->size(); - extra->send_size = sendcount*dt_size_send; - extra->datatype2 = encode_datatype(recvtype, &known); - int dt_size_recv = 1; - if(known==0) - dt_size_recv = recvtype->size(); - extra->recv_size = recvcount*dt_size_recv; - - TRACE_smpi_ptp_in(rank, src_traced, dst_traced, __FUNCTION__, extra); - TRACE_smpi_send(rank, rank, dst_traced, sendtag,sendcount*sendtype->size()); - - simgrid::smpi::Request::sendrecv(sendbuf, sendcount, sendtype, dst, sendtag, recvbuf, recvcount, recvtype, src, recvtag, comm, - status); - retval = MPI_SUCCESS; - - TRACE_smpi_ptp_out(rank, src_traced, dst_traced, __FUNCTION__); - TRACE_smpi_recv(rank, src_traced, rank, recvtag); - } - - smpi_bench_begin(); - return retval; -} - -int PMPI_Sendrecv_replace(void* buf, int count, MPI_Datatype datatype, int dst, int sendtag, int src, int recvtag, - MPI_Comm comm, MPI_Status* status) -{ - int retval = 0; - if (not datatype->is_valid()) { - return MPI_ERR_TYPE; - } else if (count < 0) { - return MPI_ERR_COUNT; - } else { - int size = datatype->get_extent() * count; - void* recvbuf = xbt_new0(char, size); - retval = MPI_Sendrecv(buf, count, datatype, dst, sendtag, recvbuf, count, datatype, src, recvtag, comm, status); - if(retval==MPI_SUCCESS){ - simgrid::smpi::Datatype::copy(recvbuf, count, datatype, buf, count, datatype); - } - xbt_free(recvbuf); - - } - return retval; -} - -int PMPI_Test(MPI_Request * request, int *flag, MPI_Status * status) -{ - int retval = 0; - smpi_bench_end(); - if (request == nullptr || flag == nullptr) { - retval = MPI_ERR_ARG; - } else if (*request == MPI_REQUEST_NULL) { - *flag= true; - simgrid::smpi::Status::empty(status); - retval = MPI_SUCCESS; - } else { - int rank = ((*request)->comm() != MPI_COMM_NULL) ? smpi_process()->index() : -1; - - instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); - extra->type = TRACING_TEST; - TRACE_smpi_testing_in(rank, extra); - - *flag = simgrid::smpi::Request::test(request,status); - - TRACE_smpi_testing_out(rank); - retval = MPI_SUCCESS; - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Testany(int count, MPI_Request requests[], int *index, int *flag, MPI_Status * status) -{ - int retval = 0; - - smpi_bench_end(); - if (index == nullptr || flag == nullptr) { - retval = MPI_ERR_ARG; - } else { - *flag = simgrid::smpi::Request::testany(count, requests, index, status); - retval = MPI_SUCCESS; - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Testall(int count, MPI_Request* requests, int* flag, MPI_Status* statuses) -{ - int retval = 0; - - smpi_bench_end(); - if (flag == nullptr) { - retval = MPI_ERR_ARG; - } else { - *flag = simgrid::smpi::Request::testall(count, requests, statuses); - retval = MPI_SUCCESS; - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Probe(int source, int tag, MPI_Comm comm, MPI_Status* status) { - int retval = 0; - smpi_bench_end(); - - if (status == nullptr) { - retval = MPI_ERR_ARG; - } else if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if (source == MPI_PROC_NULL) { - simgrid::smpi::Status::empty(status); - status->MPI_SOURCE = MPI_PROC_NULL; - retval = MPI_SUCCESS; - } else { - simgrid::smpi::Request::probe(source, tag, comm, status); - retval = MPI_SUCCESS; - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status) { - int retval = 0; - smpi_bench_end(); - - if (flag == nullptr) { - retval = MPI_ERR_ARG; - } else if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if (source == MPI_PROC_NULL) { - *flag=true; - simgrid::smpi::Status::empty(status); - status->MPI_SOURCE = MPI_PROC_NULL; - retval = MPI_SUCCESS; - } else { - simgrid::smpi::Request::iprobe(source, tag, comm, flag, status); - retval = MPI_SUCCESS; - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Wait(MPI_Request * request, MPI_Status * status) -{ - int retval = 0; - - smpi_bench_end(); - - simgrid::smpi::Status::empty(status); - - if (request == nullptr) { - retval = MPI_ERR_ARG; - } else if (*request == MPI_REQUEST_NULL) { - retval = MPI_SUCCESS; - } else { - - int rank = (request!=nullptr && (*request)->comm() != MPI_COMM_NULL) ? smpi_process()->index() : -1; - - int src_traced = (*request)->src(); - int dst_traced = (*request)->dst(); - int tag_traced= (*request)->tag(); - MPI_Comm comm = (*request)->comm(); - int is_wait_for_receive = ((*request)->flags() & RECV); - instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); - extra->type = TRACING_WAIT; - TRACE_smpi_ptp_in(rank, src_traced, dst_traced, __FUNCTION__, extra); - - simgrid::smpi::Request::wait(request, status); - retval = MPI_SUCCESS; - - //the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE) - TRACE_smpi_ptp_out(rank, src_traced, dst_traced, __FUNCTION__); - if (is_wait_for_receive) { - if(src_traced==MPI_ANY_SOURCE) - src_traced = (status!=MPI_STATUS_IGNORE) ? - comm->group()->rank(status->MPI_SOURCE) : - src_traced; - TRACE_smpi_recv(rank, src_traced, dst_traced, tag_traced); - } - } - - smpi_bench_begin(); - return retval; -} - -int PMPI_Waitany(int count, MPI_Request requests[], int *index, MPI_Status * status) -{ - if (index == nullptr) - return MPI_ERR_ARG; - - smpi_bench_end(); - //save requests information for tracing - typedef struct { - int src; - int dst; - int recv; - int tag; - MPI_Comm comm; - } savedvalstype; - savedvalstype* savedvals=nullptr; - if(count>0){ - savedvals = xbt_new0(savedvalstype, count); - } - for (int i = 0; i < count; i++) { - MPI_Request req = requests[i]; //already received requests are no longer valid - if (req) { - savedvals[i]=(savedvalstype){req->src(), req->dst(), (req->flags() & RECV), req->tag(), req->comm()}; - } - } - int rank_traced = smpi_process()->index(); - instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); - extra->type = TRACING_WAITANY; - extra->send_size=count; - TRACE_smpi_ptp_in(rank_traced, -1, -1, __FUNCTION__,extra); - - *index = simgrid::smpi::Request::waitany(count, requests, status); - - if(*index!=MPI_UNDEFINED){ - int src_traced = savedvals[*index].src; - //the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE) - int dst_traced = savedvals[*index].dst; - int is_wait_for_receive = savedvals[*index].recv; - if (is_wait_for_receive) { - if(savedvals[*index].src==MPI_ANY_SOURCE) - src_traced = (status != MPI_STATUSES_IGNORE) - ? savedvals[*index].comm->group()->rank(status->MPI_SOURCE) - : savedvals[*index].src; - TRACE_smpi_recv(rank_traced, src_traced, dst_traced, savedvals[*index].tag); - } - TRACE_smpi_ptp_out(rank_traced, src_traced, dst_traced, __FUNCTION__); - } - xbt_free(savedvals); - - smpi_bench_begin(); - return MPI_SUCCESS; -} - -int PMPI_Waitall(int count, MPI_Request requests[], MPI_Status status[]) -{ - smpi_bench_end(); - //save information from requests - typedef struct { - int src; - int dst; - int recv; - int tag; - int valid; - MPI_Comm comm; - } savedvalstype; - savedvalstype* savedvals=xbt_new0(savedvalstype, count); - - for (int i = 0; i < count; i++) { - MPI_Request req = requests[i]; - if(req!=MPI_REQUEST_NULL){ - savedvals[i]=(savedvalstype){req->src(), req->dst(), (req->flags() & RECV), req->tag(), 1, req->comm()}; - }else{ - savedvals[i].valid=0; - } - } - int rank_traced = smpi_process()->index(); - instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); - extra->type = TRACING_WAITALL; - extra->send_size=count; - TRACE_smpi_ptp_in(rank_traced, -1, -1, __FUNCTION__,extra); - - int retval = simgrid::smpi::Request::waitall(count, requests, status); - - for (int i = 0; i < count; i++) { - if(savedvals[i].valid){ - //the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE) - int src_traced = savedvals[i].src; - int dst_traced = savedvals[i].dst; - int is_wait_for_receive = savedvals[i].recv; - if (is_wait_for_receive) { - if(src_traced==MPI_ANY_SOURCE) - src_traced = (status!=MPI_STATUSES_IGNORE) ? - savedvals[i].comm->group()->rank(status[i].MPI_SOURCE) : savedvals[i].src; - TRACE_smpi_recv(rank_traced, src_traced, dst_traced,savedvals[i].tag); - } - } - } - TRACE_smpi_ptp_out(rank_traced, -1, -1, __FUNCTION__); - xbt_free(savedvals); - - smpi_bench_begin(); - return retval; -} - -int PMPI_Waitsome(int incount, MPI_Request requests[], int *outcount, int *indices, MPI_Status status[]) -{ - int retval = 0; - - smpi_bench_end(); - if (outcount == nullptr) { - retval = MPI_ERR_ARG; - } else { - *outcount = simgrid::smpi::Request::waitsome(incount, requests, indices, status); - retval = MPI_SUCCESS; - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Testsome(int incount, MPI_Request requests[], int* outcount, int* indices, MPI_Status status[]) -{ - int retval = 0; - - smpi_bench_end(); - if (outcount == nullptr) { - retval = MPI_ERR_ARG; - } else { - *outcount = simgrid::smpi::Request::testsome(incount, requests, indices, status); - retval = MPI_SUCCESS; - } - smpi_bench_begin(); - return retval; -} - - -int PMPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm) -{ - int retval = 0; - - smpi_bench_end(); - - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if (not datatype->is_valid()) { - retval = MPI_ERR_ARG; - } else { - int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; - int root_traced = comm->group()->index(root); - - instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); - extra->type = TRACING_BCAST; - extra->root = root_traced; - int known = 0; - extra->datatype1 = encode_datatype(datatype, &known); - int dt_size_send = 1; - if (known == 0) - dt_size_send = datatype->size(); - extra->send_size = count * dt_size_send; - TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra); - if (comm->size() > 1) - simgrid::smpi::Colls::bcast(buf, count, datatype, root, comm); - retval = MPI_SUCCESS; - - TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Barrier(MPI_Comm comm) -{ - int retval = 0; - - smpi_bench_end(); - - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else { - int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; - instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); - extra->type = TRACING_BARRIER; - TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); - - simgrid::smpi::Colls::barrier(comm); - - //Barrier can be used to synchronize RMA calls. Finish all requests from comm before. - comm->finish_rma_calls(); - - retval = MPI_SUCCESS; - - TRACE_smpi_collective_out(rank, -1, __FUNCTION__); - } - - smpi_bench_begin(); - return retval; -} - -int PMPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvbuf, int recvcount, MPI_Datatype recvtype, - int root, MPI_Comm comm) -{ - int retval = 0; - - smpi_bench_end(); - - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) || - ((comm->rank() == root) && (recvtype == MPI_DATATYPE_NULL))){ - retval = MPI_ERR_TYPE; - } else if ((( sendbuf != MPI_IN_PLACE) && (sendcount <0)) || ((comm->rank() == root) && (recvcount <0))){ - retval = MPI_ERR_COUNT; - } else { - - char* sendtmpbuf = static_cast(sendbuf); - int sendtmpcount = sendcount; - MPI_Datatype sendtmptype = sendtype; - if( (comm->rank() == root) && (sendbuf == MPI_IN_PLACE )) { - sendtmpcount=0; - sendtmptype=recvtype; - } - int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; - int root_traced = comm->group()->index(root); - instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); - extra->type = TRACING_GATHER; - extra->root = root_traced; - int known = 0; - extra->datatype1 = encode_datatype(sendtmptype, &known); - int dt_size_send = 1; - if (known == 0) - dt_size_send = sendtmptype->size(); - extra->send_size = sendtmpcount * dt_size_send; - extra->datatype2 = encode_datatype(recvtype, &known); - int dt_size_recv = 1; - if ((comm->rank() == root) && known == 0) - dt_size_recv = recvtype->size(); - extra->recv_size = recvcount * dt_size_recv; - - TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra); - - simgrid::smpi::Colls::gather(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcount, recvtype, root, comm); - - retval = MPI_SUCCESS; - TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__); - } - - smpi_bench_begin(); - return retval; -} - -int PMPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int *recvcounts, int *displs, - MPI_Datatype recvtype, int root, MPI_Comm comm) -{ - int retval = 0; - - smpi_bench_end(); - - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) || - ((comm->rank() == root) && (recvtype == MPI_DATATYPE_NULL))){ - retval = MPI_ERR_TYPE; - } else if (( sendbuf != MPI_IN_PLACE) && (sendcount <0)){ - retval = MPI_ERR_COUNT; - } else if (recvcounts == nullptr || displs == nullptr) { - retval = MPI_ERR_ARG; - } else { - char* sendtmpbuf = static_cast(sendbuf); - int sendtmpcount = sendcount; - MPI_Datatype sendtmptype = sendtype; - if( (comm->rank() == root) && (sendbuf == MPI_IN_PLACE )) { - sendtmpcount=0; - sendtmptype=recvtype; - } - - int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; - int root_traced = comm->group()->index(root); - int size = comm->size(); - instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); - extra->type = TRACING_GATHERV; - extra->num_processes = size; - extra->root = root_traced; - int known = 0; - extra->datatype1 = encode_datatype(sendtmptype, &known); - int dt_size_send = 1; - if (known == 0) - dt_size_send = sendtype->size(); - extra->send_size = sendtmpcount * dt_size_send; - extra->datatype2 = encode_datatype(recvtype, &known); - int dt_size_recv = 1; - if (known == 0) - dt_size_recv = recvtype->size(); - if (comm->rank() == root) { - extra->recvcounts = xbt_new(int, size); - for (int i = 0; i < size; i++) // copy data to avoid bad free - extra->recvcounts[i] = recvcounts[i] * dt_size_recv; - } - TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra); - - retval = simgrid::smpi::Colls::gatherv(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcounts, displs, recvtype, root, comm); - TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__); - } - - smpi_bench_begin(); - return retval; -} - -int PMPI_Allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype, - void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm) -{ - int retval = 0; - - smpi_bench_end(); - - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) || - (recvtype == MPI_DATATYPE_NULL)){ - retval = MPI_ERR_TYPE; - } else if ((( sendbuf != MPI_IN_PLACE) && (sendcount <0)) || - (recvcount <0)){ - retval = MPI_ERR_COUNT; - } else { - if(sendbuf == MPI_IN_PLACE) { - sendbuf=static_cast(recvbuf)+recvtype->get_extent()*recvcount*comm->rank(); - sendcount=recvcount; - sendtype=recvtype; - } - int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; - instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); - extra->type = TRACING_ALLGATHER; - int known = 0; - extra->datatype1 = encode_datatype(sendtype, &known); - int dt_size_send = 1; - if (known == 0) - dt_size_send = sendtype->size(); - extra->send_size = sendcount * dt_size_send; - extra->datatype2 = encode_datatype(recvtype, &known); - int dt_size_recv = 1; - if (known == 0) - dt_size_recv = recvtype->size(); - extra->recv_size = recvcount * dt_size_recv; - - TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); - - simgrid::smpi::Colls::allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm); - retval = MPI_SUCCESS; - TRACE_smpi_collective_out(rank, -1, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, - void *recvbuf, int *recvcounts, int *displs, MPI_Datatype recvtype, MPI_Comm comm) -{ - int retval = 0; - - smpi_bench_end(); - - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if (((sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) || (recvtype == MPI_DATATYPE_NULL)) { - retval = MPI_ERR_TYPE; - } else if (( sendbuf != MPI_IN_PLACE) && (sendcount <0)){ - retval = MPI_ERR_COUNT; - } else if (recvcounts == nullptr || displs == nullptr) { - retval = MPI_ERR_ARG; - } else { - - if(sendbuf == MPI_IN_PLACE) { - sendbuf=static_cast(recvbuf)+recvtype->get_extent()*displs[comm->rank()]; - sendcount=recvcounts[comm->rank()]; - sendtype=recvtype; - } - int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; - int i = 0; - int size = comm->size(); - instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); - extra->type = TRACING_ALLGATHERV; - extra->num_processes = size; - int known = 0; - extra->datatype1 = encode_datatype(sendtype, &known); - int dt_size_send = 1; - if (known == 0) - dt_size_send = sendtype->size(); - extra->send_size = sendcount * dt_size_send; - extra->datatype2 = encode_datatype(recvtype, &known); - int dt_size_recv = 1; - if (known == 0) - dt_size_recv = recvtype->size(); - extra->recvcounts = xbt_new(int, size); - for (i = 0; i < size; i++) // copy data to avoid bad free - extra->recvcounts[i] = recvcounts[i] * dt_size_recv; - - TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); - - simgrid::smpi::Colls::allgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm); - retval = MPI_SUCCESS; - TRACE_smpi_collective_out(rank, -1, __FUNCTION__); - } - - smpi_bench_begin(); - return retval; -} - -int PMPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype, - void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm) -{ - int retval = 0; - - smpi_bench_end(); - - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if (((comm->rank() == root) && (not sendtype->is_valid())) || - ((recvbuf != MPI_IN_PLACE) && (not recvtype->is_valid()))) { - retval = MPI_ERR_TYPE; - } else if ((sendbuf == recvbuf) || - ((comm->rank()==root) && sendcount>0 && (sendbuf == nullptr))){ - retval = MPI_ERR_BUFFER; - }else { - - if (recvbuf == MPI_IN_PLACE) { - recvtype = sendtype; - recvcount = sendcount; - } - int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; - int root_traced = comm->group()->index(root); - instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); - extra->type = TRACING_SCATTER; - extra->root = root_traced; - int known = 0; - extra->datatype1 = encode_datatype(sendtype, &known); - int dt_size_send = 1; - if ((comm->rank() == root) && known == 0) - dt_size_send = sendtype->size(); - extra->send_size = sendcount * dt_size_send; - extra->datatype2 = encode_datatype(recvtype, &known); - int dt_size_recv = 1; - if (known == 0) - dt_size_recv = recvtype->size(); - extra->recv_size = recvcount * dt_size_recv; - TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra); - - simgrid::smpi::Colls::scatter(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm); - retval = MPI_SUCCESS; - TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__); - } - - smpi_bench_begin(); - return retval; -} - -int PMPI_Scatterv(void *sendbuf, int *sendcounts, int *displs, - MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm) -{ - int retval = 0; - - smpi_bench_end(); - - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if (sendcounts == nullptr || displs == nullptr) { - retval = MPI_ERR_ARG; - } else if (((comm->rank() == root) && (sendtype == MPI_DATATYPE_NULL)) || - ((recvbuf != MPI_IN_PLACE) && (recvtype == MPI_DATATYPE_NULL))) { - retval = MPI_ERR_TYPE; - } else { - if (recvbuf == MPI_IN_PLACE) { - recvtype = sendtype; - recvcount = sendcounts[comm->rank()]; - } - int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; - int root_traced = comm->group()->index(root); - int size = comm->size(); - instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); - extra->type = TRACING_SCATTERV; - extra->num_processes = size; - extra->root = root_traced; - int known = 0; - extra->datatype1 = encode_datatype(sendtype, &known); - int dt_size_send = 1; - if (known == 0) - dt_size_send = sendtype->size(); - if (comm->rank() == root) { - extra->sendcounts = xbt_new(int, size); - for (int i = 0; i < size; i++) // copy data to avoid bad free - extra->sendcounts[i] = sendcounts[i] * dt_size_send; - } - extra->datatype2 = encode_datatype(recvtype, &known); - int dt_size_recv = 1; - if (known == 0) - dt_size_recv = recvtype->size(); - extra->recv_size = recvcount * dt_size_recv; - TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra); - - retval = simgrid::smpi::Colls::scatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm); - - TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__); - } - - smpi_bench_begin(); - return retval; -} - -int PMPI_Reduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm) -{ - int retval = 0; - - smpi_bench_end(); - - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if (not datatype->is_valid() || op == MPI_OP_NULL) { - retval = MPI_ERR_ARG; - } else { - int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; - int root_traced = comm->group()->index(root); - instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); - extra->type = TRACING_REDUCE; - int known = 0; - extra->datatype1 = encode_datatype(datatype, &known); - int dt_size_send = 1; - if (known == 0) - dt_size_send = datatype->size(); - extra->send_size = count * dt_size_send; - extra->root = root_traced; - - TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra); - - simgrid::smpi::Colls::reduce(sendbuf, recvbuf, count, datatype, op, root, comm); - - retval = MPI_SUCCESS; - TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__); - } - - smpi_bench_begin(); - return retval; -} - -int PMPI_Reduce_local(void *inbuf, void *inoutbuf, int count, MPI_Datatype datatype, MPI_Op op){ - int retval = 0; - - smpi_bench_end(); - if (not datatype->is_valid() || op == MPI_OP_NULL) { - retval = MPI_ERR_ARG; - } else { - op->apply(inbuf, inoutbuf, &count, datatype); - retval = MPI_SUCCESS; - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Allreduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) -{ - int retval = 0; - - smpi_bench_end(); - - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if (not datatype->is_valid()) { - retval = MPI_ERR_TYPE; - } else if (op == MPI_OP_NULL) { - retval = MPI_ERR_OP; - } else { - - char* sendtmpbuf = static_cast(sendbuf); - if( sendbuf == MPI_IN_PLACE ) { - sendtmpbuf = static_cast(xbt_malloc(count*datatype->get_extent())); - simgrid::smpi::Datatype::copy(recvbuf, count, datatype,sendtmpbuf, count, datatype); - } - int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; - instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); - extra->type = TRACING_ALLREDUCE; - int known = 0; - extra->datatype1 = encode_datatype(datatype, &known); - int dt_size_send = 1; - if (known == 0) - dt_size_send = datatype->size(); - extra->send_size = count * dt_size_send; - - TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); - - simgrid::smpi::Colls::allreduce(sendtmpbuf, recvbuf, count, datatype, op, comm); - - if( sendbuf == MPI_IN_PLACE ) - xbt_free(sendtmpbuf); - - retval = MPI_SUCCESS; - TRACE_smpi_collective_out(rank, -1, __FUNCTION__); - } - - smpi_bench_begin(); - return retval; -} - -int PMPI_Scan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) -{ - int retval = 0; - - smpi_bench_end(); - - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if (not datatype->is_valid()) { - retval = MPI_ERR_TYPE; - } else if (op == MPI_OP_NULL) { - retval = MPI_ERR_OP; - } else { - int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; - instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); - extra->type = TRACING_SCAN; - int known = 0; - extra->datatype1 = encode_datatype(datatype, &known); - int dt_size_send = 1; - if (known == 0) - dt_size_send = datatype->size(); - extra->send_size = count * dt_size_send; - - TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); - - retval = simgrid::smpi::Colls::scan(sendbuf, recvbuf, count, datatype, op, comm); - - TRACE_smpi_collective_out(rank, -1, __FUNCTION__); - } - - smpi_bench_begin(); - return retval; -} - -int PMPI_Exscan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm){ - int retval = 0; - - smpi_bench_end(); - - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if (not datatype->is_valid()) { - retval = MPI_ERR_TYPE; - } else if (op == MPI_OP_NULL) { - retval = MPI_ERR_OP; - } else { - int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; - instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); - extra->type = TRACING_EXSCAN; - int known = 0; - extra->datatype1 = encode_datatype(datatype, &known); - int dt_size_send = 1; - if (known == 0) - dt_size_send = datatype->size(); - extra->send_size = count * dt_size_send; - void* sendtmpbuf = sendbuf; - if (sendbuf == MPI_IN_PLACE) { - sendtmpbuf = static_cast(xbt_malloc(count * datatype->size())); - memcpy(sendtmpbuf, recvbuf, count * datatype->size()); - } - TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); - - retval = simgrid::smpi::Colls::exscan(sendtmpbuf, recvbuf, count, datatype, op, comm); - - TRACE_smpi_collective_out(rank, -1, __FUNCTION__); - if (sendbuf == MPI_IN_PLACE) - xbt_free(sendtmpbuf); - } - - smpi_bench_begin(); - return retval; -} - -int PMPI_Reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) -{ - int retval = 0; - smpi_bench_end(); - - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if (not datatype->is_valid()) { - retval = MPI_ERR_TYPE; - } else if (op == MPI_OP_NULL) { - retval = MPI_ERR_OP; - } else if (recvcounts == nullptr) { - retval = MPI_ERR_ARG; - } else { - int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; - int i = 0; - int size = comm->size(); - instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); - extra->type = TRACING_REDUCE_SCATTER; - extra->num_processes = size; - int known = 0; - extra->datatype1 = encode_datatype(datatype, &known); - int dt_size_send = 1; - if (known == 0) - dt_size_send = datatype->size(); - extra->send_size = 0; - extra->recvcounts = xbt_new(int, size); - int totalcount = 0; - for (i = 0; i < size; i++) { // copy data to avoid bad free - extra->recvcounts[i] = recvcounts[i] * dt_size_send; - totalcount += recvcounts[i]; - } - void* sendtmpbuf = sendbuf; - if (sendbuf == MPI_IN_PLACE) { - sendtmpbuf = static_cast(xbt_malloc(totalcount * datatype->size())); - memcpy(sendtmpbuf, recvbuf, totalcount * datatype->size()); - } - - TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); - - simgrid::smpi::Colls::reduce_scatter(sendtmpbuf, recvbuf, recvcounts, datatype, op, comm); - retval = MPI_SUCCESS; - TRACE_smpi_collective_out(rank, -1, __FUNCTION__); - - if (sendbuf == MPI_IN_PLACE) - xbt_free(sendtmpbuf); - } - - smpi_bench_begin(); - return retval; -} - -int PMPI_Reduce_scatter_block(void *sendbuf, void *recvbuf, int recvcount, - MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) -{ - int retval; - smpi_bench_end(); - - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if (not datatype->is_valid()) { - retval = MPI_ERR_TYPE; - } else if (op == MPI_OP_NULL) { - retval = MPI_ERR_OP; - } else if (recvcount < 0) { - retval = MPI_ERR_ARG; - } else { - int count = comm->size(); - - int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; - instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); - extra->type = TRACING_REDUCE_SCATTER; - extra->num_processes = count; - int known = 0; - extra->datatype1 = encode_datatype(datatype, &known); - int dt_size_send = 1; - if (known == 0) - dt_size_send = datatype->size(); - extra->send_size = 0; - extra->recvcounts = xbt_new(int, count); - for (int i = 0; i < count; i++) // copy data to avoid bad free - extra->recvcounts[i] = recvcount * dt_size_send; - void* sendtmpbuf = sendbuf; - if (sendbuf == MPI_IN_PLACE) { - sendtmpbuf = static_cast(xbt_malloc(recvcount * count * datatype->size())); - memcpy(sendtmpbuf, recvbuf, recvcount * count * datatype->size()); - } - - TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); - - int* recvcounts = static_cast(xbt_malloc(count * sizeof(int))); - for (int i = 0; i < count; i++) - recvcounts[i] = recvcount; - simgrid::smpi::Colls::reduce_scatter(sendtmpbuf, recvbuf, recvcounts, datatype, op, comm); - xbt_free(recvcounts); - retval = MPI_SUCCESS; - - TRACE_smpi_collective_out(rank, -1, __FUNCTION__); - - if (sendbuf == MPI_IN_PLACE) - xbt_free(sendtmpbuf); - } - - smpi_bench_begin(); - return retval; -} - -int PMPI_Alltoall(void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount, - MPI_Datatype recvtype, MPI_Comm comm) -{ - int retval = 0; - smpi_bench_end(); - - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if ((sendbuf != MPI_IN_PLACE && sendtype == MPI_DATATYPE_NULL) || recvtype == MPI_DATATYPE_NULL) { - retval = MPI_ERR_TYPE; - } else { - int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; - instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); - extra->type = TRACING_ALLTOALL; - - void* sendtmpbuf = static_cast(sendbuf); - int sendtmpcount = sendcount; - MPI_Datatype sendtmptype = sendtype; - if (sendbuf == MPI_IN_PLACE) { - sendtmpbuf = static_cast(xbt_malloc(recvcount * comm->size() * recvtype->size())); - memcpy(sendtmpbuf, recvbuf, recvcount * comm->size() * recvtype->size()); - sendtmpcount = recvcount; - sendtmptype = recvtype; - } - - int known = 0; - extra->datatype1 = encode_datatype(sendtmptype, &known); - if (known == 0) - extra->send_size = sendtmpcount * sendtmptype->size(); - else - extra->send_size = sendtmpcount; - extra->datatype2 = encode_datatype(recvtype, &known); - if (known == 0) - extra->recv_size = recvcount * recvtype->size(); - else - extra->recv_size = recvcount; - - TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); - - retval = simgrid::smpi::Colls::alltoall(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcount, recvtype, comm); - - TRACE_smpi_collective_out(rank, -1, __FUNCTION__); - - if (sendbuf == MPI_IN_PLACE) - xbt_free(sendtmpbuf); - } - - smpi_bench_begin(); - return retval; -} - -int PMPI_Alltoallv(void* sendbuf, int* sendcounts, int* senddisps, MPI_Datatype sendtype, void* recvbuf, - int* recvcounts, int* recvdisps, MPI_Datatype recvtype, MPI_Comm comm) -{ - int retval = 0; - - smpi_bench_end(); - - if (comm == MPI_COMM_NULL) { - retval = MPI_ERR_COMM; - } else if (sendtype == MPI_DATATYPE_NULL || recvtype == MPI_DATATYPE_NULL) { - retval = MPI_ERR_TYPE; - } else if ((sendbuf != MPI_IN_PLACE && (sendcounts == nullptr || senddisps == nullptr)) || recvcounts == nullptr || - recvdisps == nullptr) { - retval = MPI_ERR_ARG; - } else { - int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; - int i = 0; - int size = comm->size(); - instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); - extra->type = TRACING_ALLTOALLV; - extra->send_size = 0; - extra->recv_size = 0; - extra->recvcounts = xbt_new(int, size); - extra->sendcounts = xbt_new(int, size); - int known = 0; - extra->datatype2 = encode_datatype(recvtype, &known); - int dt_size_recv = recvtype->size(); - - void* sendtmpbuf = static_cast(sendbuf); - int* sendtmpcounts = sendcounts; - int* sendtmpdisps = senddisps; - MPI_Datatype sendtmptype = sendtype; - int maxsize = 0; - for (i = 0; i < size; i++) { // copy data to avoid bad free - extra->recv_size += recvcounts[i] * dt_size_recv; - extra->recvcounts[i] = recvcounts[i] * dt_size_recv; - if (((recvdisps[i] + recvcounts[i]) * dt_size_recv) > maxsize) - maxsize = (recvdisps[i] + recvcounts[i]) * dt_size_recv; - } - - if (sendbuf == MPI_IN_PLACE) { - sendtmpbuf = static_cast(xbt_malloc(maxsize)); - memcpy(sendtmpbuf, recvbuf, maxsize); - sendtmpcounts = static_cast(xbt_malloc(size * sizeof(int))); - memcpy(sendtmpcounts, recvcounts, size * sizeof(int)); - sendtmpdisps = static_cast(xbt_malloc(size * sizeof(int))); - memcpy(sendtmpdisps, recvdisps, size * sizeof(int)); - sendtmptype = recvtype; - } - - extra->datatype1 = encode_datatype(sendtmptype, &known); - int dt_size_send = sendtmptype->size(); - - for (i = 0; i < size; i++) { // copy data to avoid bad free - extra->send_size += sendtmpcounts[i] * dt_size_send; - extra->sendcounts[i] = sendtmpcounts[i] * dt_size_send; - } - extra->num_processes = size; - TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); - retval = simgrid::smpi::Colls::alltoallv(sendtmpbuf, sendtmpcounts, sendtmpdisps, sendtmptype, recvbuf, recvcounts, - recvdisps, recvtype, comm); - TRACE_smpi_collective_out(rank, -1, __FUNCTION__); - - if (sendbuf == MPI_IN_PLACE) { - xbt_free(sendtmpbuf); - xbt_free(sendtmpcounts); - xbt_free(sendtmpdisps); - } - } - - smpi_bench_begin(); - return retval; -} - - -int PMPI_Get_processor_name(char *name, int *resultlen) -{ - strncpy(name, sg_host_self()->getCname(), strlen(sg_host_self()->getCname()) < MPI_MAX_PROCESSOR_NAME - 1 - ? strlen(sg_host_self()->getCname()) + 1 - : MPI_MAX_PROCESSOR_NAME - 1); - *resultlen = strlen(name) > MPI_MAX_PROCESSOR_NAME ? MPI_MAX_PROCESSOR_NAME : strlen(name); - - return MPI_SUCCESS; -} - -int PMPI_Get_count(MPI_Status * status, MPI_Datatype datatype, int *count) -{ - if (status == nullptr || count == nullptr) { - return MPI_ERR_ARG; - } else if (not datatype->is_valid()) { - return MPI_ERR_TYPE; - } else { - size_t size = datatype->size(); - if (size == 0) { - *count = 0; - return MPI_SUCCESS; - } else if (status->count % size != 0) { - return MPI_UNDEFINED; - } else { - *count = simgrid::smpi::Status::get_count(status, datatype); - return MPI_SUCCESS; - } - } -} - -int PMPI_Type_contiguous(int count, MPI_Datatype old_type, MPI_Datatype* new_type) { - if (old_type == MPI_DATATYPE_NULL) { - return MPI_ERR_TYPE; - } else if (count<0){ - return MPI_ERR_COUNT; - } else { - return simgrid::smpi::Datatype::create_contiguous(count, old_type, 0, new_type); - } -} - -int PMPI_Type_commit(MPI_Datatype* datatype) { - if (datatype == nullptr || *datatype == MPI_DATATYPE_NULL) { - return MPI_ERR_TYPE; - } else { - (*datatype)->commit(); - return MPI_SUCCESS; - } -} - -int PMPI_Type_vector(int count, int blocklen, int stride, MPI_Datatype old_type, MPI_Datatype* new_type) { - if (old_type == MPI_DATATYPE_NULL) { - return MPI_ERR_TYPE; - } else if (count<0 || blocklen<0){ - return MPI_ERR_COUNT; - } else { - return simgrid::smpi::Datatype::create_vector(count, blocklen, stride, old_type, new_type); - } -} - -int PMPI_Type_hvector(int count, int blocklen, MPI_Aint stride, MPI_Datatype old_type, MPI_Datatype* new_type) { - if (old_type == MPI_DATATYPE_NULL) { - return MPI_ERR_TYPE; - } else if (count<0 || blocklen<0){ - return MPI_ERR_COUNT; - } else { - return simgrid::smpi::Datatype::create_hvector(count, blocklen, stride, old_type, new_type); - } -} - -int PMPI_Type_create_hvector(int count, int blocklen, MPI_Aint stride, MPI_Datatype old_type, MPI_Datatype* new_type) { - return MPI_Type_hvector(count, blocklen, stride, old_type, new_type); -} - -int PMPI_Type_indexed(int count, int* blocklens, int* indices, MPI_Datatype old_type, MPI_Datatype* new_type) { - if (old_type == MPI_DATATYPE_NULL) { - return MPI_ERR_TYPE; - } else if (count<0){ - return MPI_ERR_COUNT; - } else { - return simgrid::smpi::Datatype::create_indexed(count, blocklens, indices, old_type, new_type); - } -} - -int PMPI_Type_create_indexed(int count, int* blocklens, int* indices, MPI_Datatype old_type, MPI_Datatype* new_type) { - if (old_type == MPI_DATATYPE_NULL) { - return MPI_ERR_TYPE; - } else if (count<0){ - return MPI_ERR_COUNT; - } else { - return simgrid::smpi::Datatype::create_indexed(count, blocklens, indices, old_type, new_type); - } -} - -int PMPI_Type_create_indexed_block(int count, int blocklength, int* indices, MPI_Datatype old_type, - MPI_Datatype* new_type) -{ - if (old_type == MPI_DATATYPE_NULL) { - return MPI_ERR_TYPE; - } else if (count<0){ - return MPI_ERR_COUNT; - } else { - int* blocklens=static_cast(xbt_malloc(blocklength*count*sizeof(int))); - for (int i = 0; i < count; i++) - blocklens[i]=blocklength; - int retval = simgrid::smpi::Datatype::create_indexed(count, blocklens, indices, old_type, new_type); - xbt_free(blocklens); - return retval; - } -} - -int PMPI_Type_hindexed(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype old_type, MPI_Datatype* new_type) -{ - if (old_type == MPI_DATATYPE_NULL) { - return MPI_ERR_TYPE; - } else if (count<0){ - return MPI_ERR_COUNT; - } else { - return simgrid::smpi::Datatype::create_hindexed(count, blocklens, indices, old_type, new_type); - } -} - -int PMPI_Type_create_hindexed(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype old_type, - MPI_Datatype* new_type) { - return PMPI_Type_hindexed(count, blocklens,indices,old_type,new_type); -} - -int PMPI_Type_create_hindexed_block(int count, int blocklength, MPI_Aint* indices, MPI_Datatype old_type, - MPI_Datatype* new_type) { - if (old_type == MPI_DATATYPE_NULL) { - return MPI_ERR_TYPE; - } else if (count<0){ - return MPI_ERR_COUNT; - } else { - int* blocklens=(int*)xbt_malloc(blocklength*count*sizeof(int)); - for (int i = 0; i < count; i++) - blocklens[i] = blocklength; - int retval = simgrid::smpi::Datatype::create_hindexed(count, blocklens, indices, old_type, new_type); - xbt_free(blocklens); - return retval; - } -} - -int PMPI_Type_struct(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype* old_types, MPI_Datatype* new_type) { - if (count<0){ - return MPI_ERR_COUNT; - } else { - return simgrid::smpi::Datatype::create_struct(count, blocklens, indices, old_types, new_type); - } -} - -int PMPI_Type_create_struct(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype* old_types, - MPI_Datatype* new_type) { - return PMPI_Type_struct(count, blocklens, indices, old_types, new_type); -} - -int PMPI_Error_class(int errorcode, int* errorclass) { - // assume smpi uses only standard mpi error codes - *errorclass=errorcode; - return MPI_SUCCESS; -} - -int PMPI_Initialized(int* flag) { - *flag=(smpi_process()!=nullptr && smpi_process()->initialized()); - return MPI_SUCCESS; -} - -/* The topo part of MPI_COMM_WORLD should always be nullptr. When other topologies will be implemented, not only should we - * check if the topology is nullptr, but we should check if it is the good topology type (so we have to add a - * MPIR_Topo_Type field, and replace the MPI_Topology field by an union)*/ - -int PMPI_Cart_create(MPI_Comm comm_old, int ndims, int* dims, int* periodic, int reorder, MPI_Comm* comm_cart) { - if (comm_old == MPI_COMM_NULL){ - return MPI_ERR_COMM; - } else if (ndims < 0 || (ndims > 0 && (dims == nullptr || periodic == nullptr)) || comm_cart == nullptr) { - return MPI_ERR_ARG; - } else{ - simgrid::smpi::Topo_Cart* topo = new simgrid::smpi::Topo_Cart(comm_old, ndims, dims, periodic, reorder, comm_cart); - if(*comm_cart==MPI_COMM_NULL) - delete topo; - return MPI_SUCCESS; - } -} - -int PMPI_Cart_rank(MPI_Comm comm, int* coords, int* rank) { - if(comm == MPI_COMM_NULL || comm->topo() == nullptr) { - return MPI_ERR_TOPOLOGY; - } - if (coords == nullptr) { - return MPI_ERR_ARG; - } - MPIR_Cart_Topology topo = static_cast(comm->topo()); - if (topo==nullptr) { - return MPI_ERR_ARG; - } - return topo->rank(coords, rank); -} - -int PMPI_Cart_shift(MPI_Comm comm, int direction, int displ, int* source, int* dest) { - if(comm == MPI_COMM_NULL || comm->topo() == nullptr) { - return MPI_ERR_TOPOLOGY; - } - if (source == nullptr || dest == nullptr || direction < 0 ) { - return MPI_ERR_ARG; - } - MPIR_Cart_Topology topo = static_cast(comm->topo()); - if (topo==nullptr) { - return MPI_ERR_ARG; - } - return topo->shift(direction, displ, source, dest); -} - -int PMPI_Cart_coords(MPI_Comm comm, int rank, int maxdims, int* coords) { - if(comm == MPI_COMM_NULL || comm->topo() == nullptr) { - return MPI_ERR_TOPOLOGY; - } - if (rank < 0 || rank >= comm->size()) { - return MPI_ERR_RANK; - } - if (maxdims <= 0) { - return MPI_ERR_ARG; - } - if(coords == nullptr) { - return MPI_ERR_ARG; - } - MPIR_Cart_Topology topo = static_cast(comm->topo()); - if (topo==nullptr) { - return MPI_ERR_ARG; - } - return topo->coords(rank, maxdims, coords); -} - -int PMPI_Cart_get(MPI_Comm comm, int maxdims, int* dims, int* periods, int* coords) { - if(comm == nullptr || comm->topo() == nullptr) { - return MPI_ERR_TOPOLOGY; - } - if(maxdims <= 0 || dims == nullptr || periods == nullptr || coords == nullptr) { - return MPI_ERR_ARG; - } - MPIR_Cart_Topology topo = static_cast(comm->topo()); - if (topo==nullptr) { - return MPI_ERR_ARG; - } - return topo->get(maxdims, dims, periods, coords); -} - -int PMPI_Cartdim_get(MPI_Comm comm, int* ndims) { - if (comm == MPI_COMM_NULL || comm->topo() == nullptr) { - return MPI_ERR_TOPOLOGY; - } - if (ndims == nullptr) { - return MPI_ERR_ARG; - } - MPIR_Cart_Topology topo = static_cast(comm->topo()); - if (topo==nullptr) { - return MPI_ERR_ARG; - } - return topo->dim_get(ndims); -} - -int PMPI_Dims_create(int nnodes, int ndims, int* dims) { - if(dims == nullptr) { - return MPI_ERR_ARG; - } - if (ndims < 1 || nnodes < 1) { - return MPI_ERR_DIMS; - } - return simgrid::smpi::Topo_Cart::Dims_create(nnodes, ndims, dims); -} - -int PMPI_Cart_sub(MPI_Comm comm, int* remain_dims, MPI_Comm* comm_new) { - if(comm == MPI_COMM_NULL || comm->topo() == nullptr) { - return MPI_ERR_TOPOLOGY; - } - if (comm_new == nullptr) { - return MPI_ERR_ARG; - } - MPIR_Cart_Topology topo = static_cast(comm->topo()); - if (topo==nullptr) { - return MPI_ERR_ARG; - } - MPIR_Cart_Topology cart = topo->sub(remain_dims, comm_new); - if(*comm_new==MPI_COMM_NULL) - delete cart; - if(cart==nullptr) - return MPI_ERR_ARG; - return MPI_SUCCESS; -} - -int PMPI_Type_create_resized(MPI_Datatype oldtype,MPI_Aint lb, MPI_Aint extent, MPI_Datatype *newtype){ - if (oldtype == MPI_DATATYPE_NULL) { - return MPI_ERR_TYPE; - } - int blocks[3] = {1, 1, 1}; - MPI_Aint disps[3] = {lb, 0, lb + extent}; - MPI_Datatype types[3] = {MPI_LB, oldtype, MPI_UB}; - - *newtype = new simgrid::smpi::Type_Struct(oldtype->size(), lb, lb + extent, DT_FLAG_DERIVED, 3, blocks, disps, types); - - (*newtype)->addflag(~DT_FLAG_COMMITED); - return MPI_SUCCESS; -} - -int PMPI_Win_create( void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, MPI_Win *win){ - int retval = 0; - smpi_bench_end(); - if (comm == MPI_COMM_NULL) { - retval= MPI_ERR_COMM; - }else if ((base == nullptr && size != 0) || disp_unit <= 0 || size < 0 ){ - retval= MPI_ERR_OTHER; - }else{ - *win = new simgrid::smpi::Win( base, size, disp_unit, info, comm); - retval = MPI_SUCCESS; - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Win_allocate( MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, void *base, MPI_Win *win){ - int retval = 0; - smpi_bench_end(); - if (comm == MPI_COMM_NULL) { - retval= MPI_ERR_COMM; - }else if (disp_unit <= 0 || size < 0 ){ - retval= MPI_ERR_OTHER; - }else{ - void* ptr = xbt_malloc(size); - if(ptr==nullptr) - return MPI_ERR_NO_MEM; - *static_cast(base) = ptr; - *win = new simgrid::smpi::Win( ptr, size, disp_unit, info, comm,1); - retval = MPI_SUCCESS; - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Win_create_dynamic( MPI_Info info, MPI_Comm comm, MPI_Win *win){ - int retval = 0; - smpi_bench_end(); - if (comm == MPI_COMM_NULL) { - retval= MPI_ERR_COMM; - }else{ - *win = new simgrid::smpi::Win(info, comm); - retval = MPI_SUCCESS; - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Win_attach(MPI_Win win, void *base, MPI_Aint size){ - int retval = 0; - smpi_bench_end(); - if(win == MPI_WIN_NULL){ - retval = MPI_ERR_WIN; - } else if ((base == nullptr && size != 0) || size < 0 ){ - retval= MPI_ERR_OTHER; - }else{ - retval = win->attach(base, size); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Win_detach(MPI_Win win, void *base){ - int retval = 0; - smpi_bench_end(); - if(win == MPI_WIN_NULL){ - retval = MPI_ERR_WIN; - } else if (base == nullptr){ - retval= MPI_ERR_OTHER; - }else{ - retval = win->detach(base); - } - smpi_bench_begin(); - return retval; -} - - -int PMPI_Win_free( MPI_Win* win){ - int retval = 0; - smpi_bench_end(); - if (win == nullptr || *win == MPI_WIN_NULL) { - retval = MPI_ERR_WIN; - }else{ - delete *win; - retval=MPI_SUCCESS; - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Win_set_name(MPI_Win win, char * name) -{ - if (win == MPI_WIN_NULL) { - return MPI_ERR_TYPE; - } else if (name == nullptr) { - return MPI_ERR_ARG; - } else { - win->set_name(name); - return MPI_SUCCESS; - } -} - -int PMPI_Win_get_name(MPI_Win win, char * name, int* len) -{ - if (win == MPI_WIN_NULL) { - return MPI_ERR_WIN; - } else if (name == nullptr) { - return MPI_ERR_ARG; - } else { - win->get_name(name, len); - return MPI_SUCCESS; - } -} - -int PMPI_Win_get_info(MPI_Win win, MPI_Info* info) -{ - if (win == MPI_WIN_NULL) { - return MPI_ERR_WIN; - } else { - *info = win->info(); - return MPI_SUCCESS; - } -} - -int PMPI_Win_set_info(MPI_Win win, MPI_Info info) -{ - if (win == MPI_WIN_NULL) { - return MPI_ERR_TYPE; - } else { - win->set_info(info); - return MPI_SUCCESS; - } -} - -int PMPI_Win_get_group(MPI_Win win, MPI_Group * group){ - if (win == MPI_WIN_NULL) { - return MPI_ERR_WIN; - }else { - win->get_group(group); - (*group)->ref(); - return MPI_SUCCESS; - } -} - -int PMPI_Win_fence( int assert, MPI_Win win){ - int retval = 0; - smpi_bench_end(); - if (win == MPI_WIN_NULL) { - retval = MPI_ERR_WIN; - } else { - int rank = smpi_process()->index(); - TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr); - retval = win->fence(assert); - TRACE_smpi_collective_out(rank, -1, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, - MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win){ - int retval = 0; - smpi_bench_end(); - if (win == MPI_WIN_NULL) { - retval = MPI_ERR_WIN; - } else if (target_rank == MPI_PROC_NULL) { - retval = MPI_SUCCESS; - } else if (target_rank <0){ - retval = MPI_ERR_RANK; - } else if (win->dynamic()==0 && target_disp <0){ - //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address - retval = MPI_ERR_ARG; - } else if ((origin_count < 0 || target_count < 0) || - (origin_addr==nullptr && origin_count > 0)){ - retval = MPI_ERR_COUNT; - } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) { - retval = MPI_ERR_TYPE; - } else { - int rank = smpi_process()->index(); - MPI_Group group; - win->get_group(&group); - int src_traced = group->index(target_rank); - TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr); - - retval = win->get( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count, - target_datatype); - - TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Rget( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, - MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request* request){ - int retval = 0; - smpi_bench_end(); - if (win == MPI_WIN_NULL) { - retval = MPI_ERR_WIN; - } else if (target_rank == MPI_PROC_NULL) { - *request = MPI_REQUEST_NULL; - retval = MPI_SUCCESS; - } else if (target_rank <0){ - retval = MPI_ERR_RANK; - } else if (win->dynamic()==0 && target_disp <0){ - //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address - retval = MPI_ERR_ARG; - } else if ((origin_count < 0 || target_count < 0) || - (origin_addr==nullptr && origin_count > 0)){ - retval = MPI_ERR_COUNT; - } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) { - retval = MPI_ERR_TYPE; - } else if(request == nullptr){ - retval = MPI_ERR_REQUEST; - } else { - int rank = smpi_process()->index(); - MPI_Group group; - win->get_group(&group); - int src_traced = group->index(target_rank); - TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr); - - retval = win->get( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count, - target_datatype, request); - - TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, - MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win){ - int retval = 0; - smpi_bench_end(); - if (win == MPI_WIN_NULL) { - retval = MPI_ERR_WIN; - } else if (target_rank == MPI_PROC_NULL) { - retval = MPI_SUCCESS; - } else if (target_rank <0){ - retval = MPI_ERR_RANK; - } else if (win->dynamic()==0 && target_disp <0){ - //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address - retval = MPI_ERR_ARG; - } else if ((origin_count < 0 || target_count < 0) || - (origin_addr==nullptr && origin_count > 0)){ - retval = MPI_ERR_COUNT; - } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) { - retval = MPI_ERR_TYPE; - } else { - int rank = smpi_process()->index(); - MPI_Group group; - win->get_group(&group); - int dst_traced = group->index(target_rank); - TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, nullptr); - TRACE_smpi_send(rank, rank, dst_traced, SMPI_RMA_TAG, origin_count*origin_datatype->size()); - - retval = win->put( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count, - target_datatype); - - TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Rput( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, - MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request* request){ - int retval = 0; - smpi_bench_end(); - if (win == MPI_WIN_NULL) { - retval = MPI_ERR_WIN; - } else if (target_rank == MPI_PROC_NULL) { - *request = MPI_REQUEST_NULL; - retval = MPI_SUCCESS; - } else if (target_rank <0){ - retval = MPI_ERR_RANK; - } else if (win->dynamic()==0 && target_disp <0){ - //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address - retval = MPI_ERR_ARG; - } else if ((origin_count < 0 || target_count < 0) || - (origin_addr==nullptr && origin_count > 0)){ - retval = MPI_ERR_COUNT; - } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) { - retval = MPI_ERR_TYPE; - } else if(request == nullptr){ - retval = MPI_ERR_REQUEST; - } else { - int rank = smpi_process()->index(); - MPI_Group group; - win->get_group(&group); - int dst_traced = group->index(target_rank); - TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, nullptr); - TRACE_smpi_send(rank, rank, dst_traced, SMPI_RMA_TAG, origin_count*origin_datatype->size()); - - retval = win->put( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count, - target_datatype, request); - - TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, - MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win){ - int retval = 0; - smpi_bench_end(); - if (win == MPI_WIN_NULL) { - retval = MPI_ERR_WIN; - } else if (target_rank == MPI_PROC_NULL) { - retval = MPI_SUCCESS; - } else if (target_rank <0){ - retval = MPI_ERR_RANK; - } else if (win->dynamic()==0 && target_disp <0){ - //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address - retval = MPI_ERR_ARG; - } else if ((origin_count < 0 || target_count < 0) || - (origin_addr==nullptr && origin_count > 0)){ - retval = MPI_ERR_COUNT; - } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) { - retval = MPI_ERR_TYPE; - } else if (op == MPI_OP_NULL) { - retval = MPI_ERR_OP; - } else { - int rank = smpi_process()->index(); - MPI_Group group; - win->get_group(&group); - int src_traced = group->index(target_rank); - TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr); - - retval = win->accumulate( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count, - target_datatype, op); - - TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Raccumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, - MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request){ - int retval = 0; - smpi_bench_end(); - if (win == MPI_WIN_NULL) { - retval = MPI_ERR_WIN; - } else if (target_rank == MPI_PROC_NULL) { - *request = MPI_REQUEST_NULL; - retval = MPI_SUCCESS; - } else if (target_rank <0){ - retval = MPI_ERR_RANK; - } else if (win->dynamic()==0 && target_disp <0){ - //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address - retval = MPI_ERR_ARG; - } else if ((origin_count < 0 || target_count < 0) || - (origin_addr==nullptr && origin_count > 0)){ - retval = MPI_ERR_COUNT; - } else if ((not origin_datatype->is_valid()) || (not target_datatype->is_valid())) { - retval = MPI_ERR_TYPE; - } else if (op == MPI_OP_NULL) { - retval = MPI_ERR_OP; - } else if(request == nullptr){ - retval = MPI_ERR_REQUEST; - } else { - int rank = smpi_process()->index(); - MPI_Group group; - win->get_group(&group); - int src_traced = group->index(target_rank); - TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr); - - retval = win->accumulate( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count, - target_datatype, op, request); - - TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Get_accumulate(void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr, -int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count, -MPI_Datatype target_datatype, MPI_Op op, MPI_Win win){ - int retval = 0; - smpi_bench_end(); - if (win == MPI_WIN_NULL) { - retval = MPI_ERR_WIN; - } else if (target_rank == MPI_PROC_NULL) { - retval = MPI_SUCCESS; - } else if (target_rank <0){ - retval = MPI_ERR_RANK; - } else if (win->dynamic()==0 && target_disp <0){ - //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address - retval = MPI_ERR_ARG; - } else if ((origin_count < 0 || target_count < 0 || result_count <0) || - (origin_addr==nullptr && origin_count > 0 && op != MPI_NO_OP) || - (result_addr==nullptr && result_count > 0)){ - retval = MPI_ERR_COUNT; - } else if ((origin_datatype != MPI_DATATYPE_NULL && not origin_datatype->is_valid()) || - (not target_datatype->is_valid()) || (not result_datatype->is_valid())) { - retval = MPI_ERR_TYPE; - } else if (op == MPI_OP_NULL) { - retval = MPI_ERR_OP; - } else { - int rank = smpi_process()->index(); - MPI_Group group; - win->get_group(&group); - int src_traced = group->index(target_rank); - TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr); - - retval = win->get_accumulate( origin_addr, origin_count, origin_datatype, result_addr, - result_count, result_datatype, target_rank, target_disp, - target_count, target_datatype, op); - - TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - - -int PMPI_Rget_accumulate(void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr, -int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count, -MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request){ - int retval = 0; - smpi_bench_end(); - if (win == MPI_WIN_NULL) { - retval = MPI_ERR_WIN; - } else if (target_rank == MPI_PROC_NULL) { - *request = MPI_REQUEST_NULL; - retval = MPI_SUCCESS; - } else if (target_rank <0){ - retval = MPI_ERR_RANK; - } else if (win->dynamic()==0 && target_disp <0){ - //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address - retval = MPI_ERR_ARG; - } else if ((origin_count < 0 || target_count < 0 || result_count <0) || - (origin_addr==nullptr && origin_count > 0 && op != MPI_NO_OP) || - (result_addr==nullptr && result_count > 0)){ - retval = MPI_ERR_COUNT; - } else if ((origin_datatype != MPI_DATATYPE_NULL && not origin_datatype->is_valid()) || - (not target_datatype->is_valid()) || (not result_datatype->is_valid())) { - retval = MPI_ERR_TYPE; - } else if (op == MPI_OP_NULL) { - retval = MPI_ERR_OP; - } else if(request == nullptr){ - retval = MPI_ERR_REQUEST; - } else { - int rank = smpi_process()->index(); - MPI_Group group; - win->get_group(&group); - int src_traced = group->index(target_rank); - TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr); - - retval = win->get_accumulate( origin_addr, origin_count, origin_datatype, result_addr, - result_count, result_datatype, target_rank, target_disp, - target_count, target_datatype, op, request); - - TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Fetch_and_op(void *origin_addr, void *result_addr, MPI_Datatype dtype, int target_rank, MPI_Aint target_disp, MPI_Op op, MPI_Win win){ - return PMPI_Get_accumulate(origin_addr, origin_addr==nullptr?0:1, dtype, result_addr, 1, dtype, target_rank, target_disp, 1, dtype, op, win); -} - -int PMPI_Compare_and_swap(void *origin_addr, void *compare_addr, - void *result_addr, MPI_Datatype datatype, int target_rank, - MPI_Aint target_disp, MPI_Win win){ - int retval = 0; - smpi_bench_end(); - if (win == MPI_WIN_NULL) { - retval = MPI_ERR_WIN; - } else if (target_rank == MPI_PROC_NULL) { - retval = MPI_SUCCESS; - } else if (target_rank <0){ - retval = MPI_ERR_RANK; - } else if (win->dynamic()==0 && target_disp <0){ - //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address - retval = MPI_ERR_ARG; - } else if (origin_addr==nullptr || result_addr==nullptr || compare_addr==nullptr){ - retval = MPI_ERR_COUNT; - } else if (not datatype->is_valid()) { - retval = MPI_ERR_TYPE; - } else { - int rank = smpi_process()->index(); - MPI_Group group; - win->get_group(&group); - int src_traced = group->index(target_rank); - TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr); - - retval = win->compare_and_swap( origin_addr, compare_addr, result_addr, datatype, - target_rank, target_disp); - - TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Win_post(MPI_Group group, int assert, MPI_Win win){ - int retval = 0; - smpi_bench_end(); - if (win == MPI_WIN_NULL) { - retval = MPI_ERR_WIN; - } else if (group==MPI_GROUP_NULL){ - retval = MPI_ERR_GROUP; - } else { - int rank = smpi_process()->index(); - TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr); - retval = win->post(group,assert); - TRACE_smpi_collective_out(rank, -1, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Win_start(MPI_Group group, int assert, MPI_Win win){ - int retval = 0; - smpi_bench_end(); - if (win == MPI_WIN_NULL) { - retval = MPI_ERR_WIN; - } else if (group==MPI_GROUP_NULL){ - retval = MPI_ERR_GROUP; - } else { - int rank = smpi_process()->index(); - TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr); - retval = win->start(group,assert); - TRACE_smpi_collective_out(rank, -1, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Win_complete(MPI_Win win){ - int retval = 0; - smpi_bench_end(); - if (win == MPI_WIN_NULL) { - retval = MPI_ERR_WIN; - } else { - int rank = smpi_process()->index(); - TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr); - - retval = win->complete(); - - TRACE_smpi_collective_out(rank, -1, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Win_wait(MPI_Win win){ - int retval = 0; - smpi_bench_end(); - if (win == MPI_WIN_NULL) { - retval = MPI_ERR_WIN; - } else { - int rank = smpi_process()->index(); - TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr); - - retval = win->wait(); - - TRACE_smpi_collective_out(rank, -1, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Win_lock(int lock_type, int rank, int assert, MPI_Win win){ - int retval = 0; - smpi_bench_end(); - if (win == MPI_WIN_NULL) { - retval = MPI_ERR_WIN; - } else if (lock_type != MPI_LOCK_EXCLUSIVE && - lock_type != MPI_LOCK_SHARED) { - retval = MPI_ERR_LOCKTYPE; - } else if (rank == MPI_PROC_NULL){ - retval = MPI_SUCCESS; - } else { - int myrank = smpi_process()->index(); - TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr); - retval = win->lock(lock_type,rank,assert); - TRACE_smpi_collective_out(myrank, -1, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Win_unlock(int rank, MPI_Win win){ - int retval = 0; - smpi_bench_end(); - if (win == MPI_WIN_NULL) { - retval = MPI_ERR_WIN; - } else if (rank == MPI_PROC_NULL){ - retval = MPI_SUCCESS; - } else { - int myrank = smpi_process()->index(); - TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr); - retval = win->unlock(rank); - TRACE_smpi_collective_out(myrank, -1, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Win_lock_all(int assert, MPI_Win win){ - int retval = 0; - smpi_bench_end(); - if (win == MPI_WIN_NULL) { - retval = MPI_ERR_WIN; - } else { - int myrank = smpi_process()->index(); - TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr); - retval = win->lock_all(assert); - TRACE_smpi_collective_out(myrank, -1, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Win_unlock_all(MPI_Win win){ - int retval = 0; - smpi_bench_end(); - if (win == MPI_WIN_NULL) { - retval = MPI_ERR_WIN; - } else { - int myrank = smpi_process()->index(); - TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr); - retval = win->unlock_all(); - TRACE_smpi_collective_out(myrank, -1, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Win_flush(int rank, MPI_Win win){ - int retval = 0; - smpi_bench_end(); - if (win == MPI_WIN_NULL) { - retval = MPI_ERR_WIN; - } else if (rank == MPI_PROC_NULL){ - retval = MPI_SUCCESS; - } else { - int myrank = smpi_process()->index(); - TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr); - retval = win->flush(rank); - TRACE_smpi_collective_out(myrank, -1, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Win_flush_local(int rank, MPI_Win win){ - int retval = 0; - smpi_bench_end(); - if (win == MPI_WIN_NULL) { - retval = MPI_ERR_WIN; - } else if (rank == MPI_PROC_NULL){ - retval = MPI_SUCCESS; - } else { - int myrank = smpi_process()->index(); - TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr); - retval = win->flush_local(rank); - TRACE_smpi_collective_out(myrank, -1, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Win_flush_all(MPI_Win win){ - int retval = 0; - smpi_bench_end(); - if (win == MPI_WIN_NULL) { - retval = MPI_ERR_WIN; - } else { - int myrank = smpi_process()->index(); - TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr); - retval = win->flush_all(); - TRACE_smpi_collective_out(myrank, -1, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Win_flush_local_all(MPI_Win win){ - int retval = 0; - smpi_bench_end(); - if (win == MPI_WIN_NULL) { - retval = MPI_ERR_WIN; - } else { - int myrank = smpi_process()->index(); - TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr); - retval = win->flush_local_all(); - TRACE_smpi_collective_out(myrank, -1, __FUNCTION__); - } - smpi_bench_begin(); - return retval; -} - -int PMPI_Alloc_mem(MPI_Aint size, MPI_Info info, void *baseptr){ - void *ptr = xbt_malloc(size); - if(ptr==nullptr) - return MPI_ERR_NO_MEM; - else { - *static_cast(baseptr) = ptr; - return MPI_SUCCESS; - } -} - -int PMPI_Free_mem(void *baseptr){ - xbt_free(baseptr); - return MPI_SUCCESS; -} - -int PMPI_Type_set_name(MPI_Datatype datatype, char * name) -{ - if (datatype == MPI_DATATYPE_NULL) { - return MPI_ERR_TYPE; - } else if (name == nullptr) { - return MPI_ERR_ARG; - } else { - datatype->set_name(name); - return MPI_SUCCESS; - } -} - -int PMPI_Type_get_name(MPI_Datatype datatype, char * name, int* len) -{ - if (datatype == MPI_DATATYPE_NULL) { - return MPI_ERR_TYPE; - } else if (name == nullptr) { - return MPI_ERR_ARG; - } else { - datatype->get_name(name, len); - return MPI_SUCCESS; - } -} - -MPI_Datatype PMPI_Type_f2c(MPI_Fint datatype){ - return static_cast(simgrid::smpi::F2C::f2c(datatype)); -} - -MPI_Fint PMPI_Type_c2f(MPI_Datatype datatype){ - return datatype->c2f(); -} - -MPI_Group PMPI_Group_f2c(MPI_Fint group){ - return simgrid::smpi::Group::f2c(group); -} - -MPI_Fint PMPI_Group_c2f(MPI_Group group){ - return group->c2f(); -} - -MPI_Request PMPI_Request_f2c(MPI_Fint request){ - return static_cast(simgrid::smpi::Request::f2c(request)); -} - -MPI_Fint PMPI_Request_c2f(MPI_Request request) { - return request->c2f(); -} - -MPI_Win PMPI_Win_f2c(MPI_Fint win){ - return static_cast(simgrid::smpi::Win::f2c(win)); -} - -MPI_Fint PMPI_Win_c2f(MPI_Win win){ - return win->c2f(); -} - -MPI_Op PMPI_Op_f2c(MPI_Fint op){ - return static_cast(simgrid::smpi::Op::f2c(op)); -} - -MPI_Fint PMPI_Op_c2f(MPI_Op op){ - return op->c2f(); -} - -MPI_Comm PMPI_Comm_f2c(MPI_Fint comm){ - return static_cast(simgrid::smpi::Comm::f2c(comm)); -} - -MPI_Fint PMPI_Comm_c2f(MPI_Comm comm){ - return comm->c2f(); -} - -MPI_Info PMPI_Info_f2c(MPI_Fint info){ - return static_cast(simgrid::smpi::Info::f2c(info)); -} - -MPI_Fint PMPI_Info_c2f(MPI_Info info){ - return info->c2f(); -} - -int PMPI_Keyval_create(MPI_Copy_function* copy_fn, MPI_Delete_function* delete_fn, int* keyval, void* extra_state) { - smpi_copy_fn _copy_fn={copy_fn,nullptr,nullptr}; - smpi_delete_fn _delete_fn={delete_fn,nullptr,nullptr}; - return simgrid::smpi::Keyval::keyval_create(_copy_fn, _delete_fn, keyval, extra_state); -} - -int PMPI_Keyval_free(int* keyval) { - return simgrid::smpi::Keyval::keyval_free(keyval); -} - -int PMPI_Attr_delete(MPI_Comm comm, int keyval) { - if(keyval == MPI_TAG_UB||keyval == MPI_HOST||keyval == MPI_IO ||keyval == MPI_WTIME_IS_GLOBAL||keyval == MPI_APPNUM - ||keyval == MPI_UNIVERSE_SIZE||keyval == MPI_LASTUSEDCODE) - return MPI_ERR_ARG; - else if (comm==MPI_COMM_NULL) - return MPI_ERR_COMM; - else - return comm->attr_delete(keyval); -} - -int PMPI_Attr_get(MPI_Comm comm, int keyval, void* attr_value, int* flag) { - static int one = 1; - static int zero = 0; - static int tag_ub = INT_MAX; - static int last_used_code = MPI_ERR_LASTCODE; - - if (comm==MPI_COMM_NULL){ - *flag = 0; - return MPI_ERR_COMM; - } - - switch (keyval) { - case MPI_HOST: - case MPI_IO: - case MPI_APPNUM: - *flag = 1; - *static_cast(attr_value) = &zero; - return MPI_SUCCESS; - case MPI_UNIVERSE_SIZE: - *flag = 1; - *static_cast(attr_value) = &smpi_universe_size; - return MPI_SUCCESS; - case MPI_LASTUSEDCODE: - *flag = 1; - *static_cast(attr_value) = &last_used_code; - return MPI_SUCCESS; - case MPI_TAG_UB: - *flag=1; - *static_cast(attr_value) = &tag_ub; - return MPI_SUCCESS; - case MPI_WTIME_IS_GLOBAL: - *flag = 1; - *static_cast(attr_value) = &one; - return MPI_SUCCESS; - default: - return comm->attr_get(keyval, attr_value, flag); - } -} - -int PMPI_Attr_put(MPI_Comm comm, int keyval, void* attr_value) { - if(keyval == MPI_TAG_UB||keyval == MPI_HOST||keyval == MPI_IO ||keyval == MPI_WTIME_IS_GLOBAL||keyval == MPI_APPNUM - ||keyval == MPI_UNIVERSE_SIZE||keyval == MPI_LASTUSEDCODE) - return MPI_ERR_ARG; - else if (comm==MPI_COMM_NULL) - return MPI_ERR_COMM; - else - return comm->attr_put(keyval, attr_value); -} - -int PMPI_Comm_get_attr (MPI_Comm comm, int comm_keyval, void *attribute_val, int *flag) -{ - return PMPI_Attr_get(comm, comm_keyval, attribute_val,flag); -} - -int PMPI_Comm_set_attr (MPI_Comm comm, int comm_keyval, void *attribute_val) -{ - return PMPI_Attr_put(comm, comm_keyval, attribute_val); -} - -int PMPI_Comm_delete_attr (MPI_Comm comm, int comm_keyval) -{ - return PMPI_Attr_delete(comm, comm_keyval); -} - -int PMPI_Comm_create_keyval(MPI_Comm_copy_attr_function* copy_fn, MPI_Comm_delete_attr_function* delete_fn, int* keyval, - void* extra_state) -{ - return PMPI_Keyval_create(copy_fn, delete_fn, keyval, extra_state); -} - -int PMPI_Comm_free_keyval(int* keyval) { - return PMPI_Keyval_free(keyval); -} - -int PMPI_Type_get_attr (MPI_Datatype type, int type_keyval, void *attribute_val, int* flag) -{ - if (type==MPI_DATATYPE_NULL) - return MPI_ERR_TYPE; - else - return type->attr_get(type_keyval, attribute_val, flag); -} - -int PMPI_Type_set_attr (MPI_Datatype type, int type_keyval, void *attribute_val) -{ - if (type==MPI_DATATYPE_NULL) - return MPI_ERR_TYPE; - else - return type->attr_put(type_keyval, attribute_val); -} - -int PMPI_Type_delete_attr (MPI_Datatype type, int type_keyval) -{ - if (type==MPI_DATATYPE_NULL) - return MPI_ERR_TYPE; - else - return type->attr_delete(type_keyval); -} - -int PMPI_Type_create_keyval(MPI_Type_copy_attr_function* copy_fn, MPI_Type_delete_attr_function* delete_fn, int* keyval, - void* extra_state) -{ - smpi_copy_fn _copy_fn={nullptr,copy_fn,nullptr}; - smpi_delete_fn _delete_fn={nullptr,delete_fn,nullptr}; - return simgrid::smpi::Keyval::keyval_create(_copy_fn, _delete_fn, keyval, extra_state); -} - -int PMPI_Type_free_keyval(int* keyval) { - return simgrid::smpi::Keyval::keyval_free(keyval); -} - -int PMPI_Win_get_attr (MPI_Win win, int keyval, void *attribute_val, int* flag) -{ - static MPI_Aint size; - static int disp_unit; - if (win==MPI_WIN_NULL) - return MPI_ERR_TYPE; - else{ - switch (keyval) { - case MPI_WIN_BASE : - *static_cast(attribute_val) = win->base(); - *flag = 1; - return MPI_SUCCESS; - case MPI_WIN_SIZE : - size = win->size(); - *static_cast(attribute_val) = &size; - *flag = 1; - return MPI_SUCCESS; - case MPI_WIN_DISP_UNIT : - disp_unit=win->disp_unit(); - *static_cast(attribute_val) = &disp_unit; - *flag = 1; - return MPI_SUCCESS; - default: - return win->attr_get(keyval, attribute_val, flag); - } -} - -} - -int PMPI_Win_set_attr (MPI_Win win, int type_keyval, void *attribute_val) -{ - if (win==MPI_WIN_NULL) - return MPI_ERR_TYPE; - else - return win->attr_put(type_keyval, attribute_val); -} - -int PMPI_Win_delete_attr (MPI_Win win, int type_keyval) -{ - if (win==MPI_WIN_NULL) - return MPI_ERR_TYPE; - else - return win->attr_delete(type_keyval); -} - -int PMPI_Win_create_keyval(MPI_Win_copy_attr_function* copy_fn, MPI_Win_delete_attr_function* delete_fn, int* keyval, - void* extra_state) -{ - smpi_copy_fn _copy_fn={nullptr, nullptr, copy_fn}; - smpi_delete_fn _delete_fn={nullptr, nullptr, delete_fn}; - return simgrid::smpi::Keyval::keyval_create(_copy_fn, _delete_fn, keyval, extra_state); -} - -int PMPI_Win_free_keyval(int* keyval) { - return simgrid::smpi::Keyval::keyval_free(keyval); -} - -int PMPI_Info_create( MPI_Info *info){ - if (info == nullptr) - return MPI_ERR_ARG; - *info = new simgrid::smpi::Info(); - return MPI_SUCCESS; -} - -int PMPI_Info_set( MPI_Info info, char *key, char *value){ - if (info == nullptr || key == nullptr || value == nullptr) - return MPI_ERR_ARG; - info->set(key, value); - return MPI_SUCCESS; -} - -int PMPI_Info_free( MPI_Info *info){ - if (info == nullptr || *info==nullptr) - return MPI_ERR_ARG; - simgrid::smpi::Info::unref(*info); - *info=MPI_INFO_NULL; - return MPI_SUCCESS; -} - -int PMPI_Info_get(MPI_Info info,char *key,int valuelen, char *value, int *flag){ - *flag=false; - if (info == nullptr || key == nullptr || valuelen <0) - return MPI_ERR_ARG; - if (value == nullptr) - return MPI_ERR_INFO_VALUE; - return info->get(key, valuelen, value, flag); -} - -int PMPI_Info_dup(MPI_Info info, MPI_Info *newinfo){ - if (info == nullptr || newinfo==nullptr) - return MPI_ERR_ARG; - *newinfo = new simgrid::smpi::Info(info); - return MPI_SUCCESS; -} - -int PMPI_Info_delete(MPI_Info info, char *key){ - if (info == nullptr || key==nullptr) - return MPI_ERR_ARG; - return info->remove(key); -} - -int PMPI_Info_get_nkeys( MPI_Info info, int *nkeys){ - if (info == nullptr || nkeys==nullptr) - return MPI_ERR_ARG; - return info->get_nkeys(nkeys); -} - -int PMPI_Info_get_nthkey( MPI_Info info, int n, char *key){ - if (info == nullptr || key==nullptr || n<0 || n> MPI_MAX_INFO_KEY) - return MPI_ERR_ARG; - return info->get_nthkey(n, key); -} - -int PMPI_Info_get_valuelen( MPI_Info info, char *key, int *valuelen, int *flag){ - *flag=false; - if (info == nullptr || key == nullptr || valuelen==nullptr) - return MPI_ERR_ARG; - return info->get_valuelen(key, valuelen, flag); -} - -int PMPI_Unpack(void* inbuf, int incount, int* position, void* outbuf, int outcount, MPI_Datatype type, MPI_Comm comm) { - if(incount<0 || outcount < 0 || inbuf==nullptr || outbuf==nullptr) - return MPI_ERR_ARG; - if (not type->is_valid()) - return MPI_ERR_TYPE; - if(comm==MPI_COMM_NULL) - return MPI_ERR_COMM; - return type->unpack(inbuf, incount, position, outbuf,outcount, comm); -} - -int PMPI_Pack(void* inbuf, int incount, MPI_Datatype type, void* outbuf, int outcount, int* position, MPI_Comm comm) { - if(incount<0 || outcount < 0|| inbuf==nullptr || outbuf==nullptr) - return MPI_ERR_ARG; - if (not type->is_valid()) - return MPI_ERR_TYPE; - if(comm==MPI_COMM_NULL) - return MPI_ERR_COMM; - return type->pack(inbuf, incount, outbuf,outcount,position, comm); -} - -int PMPI_Pack_size(int incount, MPI_Datatype datatype, MPI_Comm comm, int* size) { - if(incount<0) - return MPI_ERR_ARG; - if (not datatype->is_valid()) - return MPI_ERR_TYPE; - if(comm==MPI_COMM_NULL) - return MPI_ERR_COMM; - - *size=incount*datatype->size(); - - return MPI_SUCCESS; -} - -} // extern "C" diff --git a/teshsuite/smpi/mpich3-test/f77/rma/CMakeLists.txt b/teshsuite/smpi/mpich3-test/f77/rma/CMakeLists.txt index 5231aa57a9..afd0b85970 100644 --- a/teshsuite/smpi/mpich3-test/f77/rma/CMakeLists.txt +++ b/teshsuite/smpi/mpich3-test/f77/rma/CMakeLists.txt @@ -12,7 +12,7 @@ if(enable_smpi AND enable_smpi_MPICH3_testsuite AND SMPI_FORTRAN) add_executable(winaccf winaccf.f) # add_executable(winerrf winerrf.f) add_executable(winfencef winfencef.f) -# add_executable(wingroupf wingroupf.f) + add_executable(wingroupf wingroupf.f) # add_executable(baseattrwinf baseattrwinf.f) # add_executable(winattr2f winattr2f.f) # add_executable(winattrf winattrf.f) @@ -25,7 +25,7 @@ if(enable_smpi AND enable_smpi_MPICH3_testsuite AND SMPI_FORTRAN) target_link_libraries(winaccf simgrid mtest_f77) #target_link_libraries(winerrf simgrid mtest_f77) target_link_libraries(winfencef simgrid mtest_f77) -#target_link_libraries(wingroupf simgrid mtest_f77) +target_link_libraries(wingroupf simgrid mtest_f77) #target_link_libraries(baseattrwinf simgrid mtest_f77) target_link_libraries(c2f2cwinf simgrid mtest_f77) #target_link_libraries(winattr2f simgrid mtest_f77) @@ -53,6 +53,7 @@ set(examples_src ${CMAKE_CURRENT_SOURCE_DIR}/winscale1f.f ${CMAKE_CURRENT_SOURCE_DIR}/winscale2f.f ${CMAKE_CURRENT_SOURCE_DIR}/addsize.h + ${CMAKE_CURRENT_SOURCE_DIR}/attraints.h PARENT_SCOPE) set(txt_files ${txt_files} diff --git a/teshsuite/smpi/mpich3-test/f77/rma/attraints.h b/teshsuite/smpi/mpich3-test/f77/rma/attraints.h new file mode 100644 index 0000000000..fb0747f683 --- /dev/null +++ b/teshsuite/smpi/mpich3-test/f77/rma/attraints.h @@ -0,0 +1,6 @@ +C -*- Mode: Fortran; -*- +C +C (C) 2003 by Argonne National Laboratory. +C See COPYRIGHT in top-level directory. +C + integer extrastate, valin, valout, val diff --git a/teshsuite/smpi/mpich3-test/f77/rma/testlist b/teshsuite/smpi/mpich3-test/f77/rma/testlist index f3cf47b553..9141374e16 100644 --- a/teshsuite/smpi/mpich3-test/f77/rma/testlist +++ b/teshsuite/smpi/mpich3-test/f77/rma/testlist @@ -6,12 +6,9 @@ wingetf 5 needs_privatization=1 #Needs win error handling #winerrf 1 winnamef 1 -#Needs win get group -#wingroupf 4 +wingroupf 4 needs_privatization=1 winaccf 4 needs_privatization=1 -#Needs mpi_win_f2c c2f2cwinf 1 -#Needs attr #baseattrwinf 1 #winattrf 1 #winattr2f 1 diff --git a/teshsuite/smpi/mpich3-test/f90/rma/CMakeLists.txt b/teshsuite/smpi/mpich3-test/f90/rma/CMakeLists.txt index 2572c4bb42..ba8a0d3276 100644 --- a/teshsuite/smpi/mpich3-test/f90/rma/CMakeLists.txt +++ b/teshsuite/smpi/mpich3-test/f90/rma/CMakeLists.txt @@ -12,7 +12,7 @@ if(enable_smpi AND enable_smpi_MPICH3_testsuite AND SMPI_FORTRAN) add_executable(winaccf90 winaccf90.f90) # add_executable(winerrf90 winerrf90.f90) add_executable(winfencef90 winfencef90.f90) -# add_executable(wingroupf90 wingroupf90.f90) + add_executable(wingroupf90 wingroupf90.f90) # add_executable(baseattrwinf90 baseattrwinf90.f90) # add_executable(winattr2f90 winattr2f90.f90) # add_executable(winattrf90 winattrf90.f90) @@ -25,7 +25,7 @@ if(enable_smpi AND enable_smpi_MPICH3_testsuite AND SMPI_FORTRAN) target_link_libraries(winaccf90 simgrid mtest_f90) #target_link_libraries(winerrf90 simgrid mtest_f90) target_link_libraries(winfencef90 simgrid mtest_f90) -#target_link_libraries(wingroupf90 simgrid mtest_f90) +target_link_libraries(wingroupf90 simgrid mtest_f90) #target_link_libraries(baseattrwinf90 simgrid mtest_f90) target_link_libraries(c2f2cwinf90 simgrid mtest_f90) #target_link_libraries(winattr2f90 simgrid mtest_f90) diff --git a/teshsuite/smpi/mpich3-test/f90/rma/testlist b/teshsuite/smpi/mpich3-test/f90/rma/testlist index 1dddb0c8ac..0686c7359a 100644 --- a/teshsuite/smpi/mpich3-test/f90/rma/testlist +++ b/teshsuite/smpi/mpich3-test/f90/rma/testlist @@ -6,7 +6,7 @@ wingetf90 5 needs_privatization=1 #winscale2f90 4 #winerrf90 1 winnamef90 1 -#wingroupf90 4 +wingroupf90 4 needs_privatization=1 winaccf90 4 needs_privatization=1 c2f2cwinf90 1 #baseattrwinf90 1 diff --git a/tools/cmake/DefinePackages.cmake b/tools/cmake/DefinePackages.cmake index ca88375580..41f0565d9e 100644 --- a/tools/cmake/DefinePackages.cmake +++ b/tools/cmake/DefinePackages.cmake @@ -33,8 +33,8 @@ set(EXTRA_DIST src/smpi/colls/coll_tuned_topo.h src/smpi/colls/colls_private.h src/smpi/colls/smpi_mvapich2_selector_stampede.h - src/smpi/private.h - src/smpi/private.hpp + src/smpi/include/private.h + src/smpi/include/private.hpp src/surf/cpu_cas01.hpp src/surf/cpu_interface.hpp src/surf/cpu_ti.hpp @@ -96,6 +96,23 @@ set(EXTRA_DIST ) set(SMPI_SRC + src/smpi/smpi_main.c + src/smpi/bindings/smpi_mpi.cpp + src/smpi/bindings/smpi_pmpi.cpp + src/smpi/bindings/smpi_pmpi_coll.cpp + src/smpi/bindings/smpi_pmpi_comm.cpp + src/smpi/bindings/smpi_pmpi_group.cpp + src/smpi/bindings/smpi_pmpi_info.cpp + src/smpi/bindings/smpi_pmpi_op.cpp + src/smpi/bindings/smpi_pmpi_request.cpp + src/smpi/bindings/smpi_pmpi_topo.cpp + src/smpi/bindings/smpi_pmpi_type.cpp + src/smpi/bindings/smpi_pmpi_win.cpp + src/smpi/bindings/smpi_f77.cpp + src/smpi/bindings/smpi_f77_coll.cpp + src/smpi/bindings/smpi_f77_comm.cpp + src/smpi/bindings/smpi_f77_request.cpp + src/smpi/bindings/smpi_f77_type.cpp src/smpi/colls/allgather/allgather-2dmesh.cpp src/smpi/colls/allgather/allgather-3dmesh.cpp src/smpi/colls/allgather/allgather-GB.cpp @@ -200,51 +217,46 @@ set(SMPI_SRC src/smpi/colls/smpi_intel_mpi_selector.cpp src/smpi/colls/smpi_openmpi_selector.cpp src/smpi/colls/smpi_mvapich2_selector.cpp - src/smpi/instr_smpi.cpp - src/smpi/smpi_bench.cpp - src/smpi/smpi_memory.cpp - src/smpi/smpi_shared.cpp - src/smpi/smpi_static_variables.cpp - src/smpi/smpi_coll.cpp - src/smpi/smpi_coll.hpp - src/smpi/smpi_comm.cpp - src/smpi/smpi_comm.hpp - src/smpi/smpi_deployment.cpp - src/smpi/smpi_dvfs.cpp - src/smpi/smpi_global.cpp - src/smpi/smpi_f2c.cpp - src/smpi/smpi_f2c.hpp - src/smpi/smpi_group.cpp - src/smpi/smpi_group.hpp - src/smpi/SmpiHost.cpp - src/smpi/SmpiHost.hpp - src/smpi/smpi_mpi.cpp - src/smpi/smpi_datatype.cpp - src/smpi/smpi_datatype.hpp - src/smpi/smpi_info.cpp - src/smpi/smpi_info.hpp - src/smpi/smpi_keyvals.cpp - src/smpi/smpi_keyvals.hpp - src/smpi/smpi_datatype_derived.cpp - src/smpi/smpi_datatype_derived.hpp - src/smpi/smpi_main.c - src/smpi/smpi_op.cpp - src/smpi/smpi_op.hpp - src/smpi/smpi_process.cpp - src/smpi/smpi_process.hpp - src/smpi/smpi_pmpi.cpp - src/smpi/smpi_replay.cpp - src/smpi/smpi_request.cpp - src/smpi/smpi_request.hpp - src/smpi/smpi_status.cpp - src/smpi/smpi_status.hpp - src/smpi/smpi_win.cpp - src/smpi/smpi_win.hpp - src/smpi/smpi_topo.cpp - src/smpi/smpi_topo.hpp - src/smpi/smpi_utils.cpp - src/smpi/smpi_f77.cpp - + src/smpi/colls/smpi_coll.cpp + src/smpi/internals/instr_smpi.cpp + src/smpi/internals/smpi_bench.cpp + src/smpi/internals/smpi_memory.cpp + src/smpi/internals/smpi_shared.cpp + src/smpi/internals/smpi_static_variables.cpp + src/smpi/internals/smpi_deployment.cpp + src/smpi/internals/smpi_dvfs.cpp + src/smpi/internals/smpi_global.cpp + src/smpi/internals/SmpiHost.cpp + src/smpi/internals/smpi_replay.cpp + src/smpi/internals/smpi_process.cpp + src/smpi/internals/smpi_utils.cpp + src/smpi/mpi/smpi_comm.cpp + src/smpi/mpi/smpi_datatype.cpp + src/smpi/mpi/smpi_datatype_derived.cpp + src/smpi/mpi/smpi_f2c.cpp + src/smpi/mpi/smpi_group.cpp + src/smpi/mpi/smpi_info.cpp + src/smpi/mpi/smpi_keyvals.cpp + src/smpi/mpi/smpi_op.cpp + src/smpi/mpi/smpi_request.cpp + src/smpi/mpi/smpi_status.cpp + src/smpi/mpi/smpi_topo.cpp + src/smpi/mpi/smpi_win.cpp + src/smpi/include/smpi_coll.hpp + src/smpi/include/smpi_comm.hpp + src/smpi/include/smpi_f2c.hpp + src/smpi/include/smpi_group.hpp + src/smpi/include/SmpiHost.hpp + src/smpi/include/smpi_datatype.hpp + src/smpi/include/smpi_info.hpp + src/smpi/include/smpi_keyvals.hpp + src/smpi/include/smpi_datatype_derived.hpp + src/smpi/include/smpi_op.hpp + src/smpi/include/smpi_process.hpp + src/smpi/include/smpi_request.hpp + src/smpi/include/smpi_status.hpp + src/smpi/include/smpi_win.hpp + src/smpi/include/smpi_topo.hpp src/surf/network_smpi.cpp src/surf/network_ib.cpp ) diff --git a/tools/simgrid.supp b/tools/simgrid.supp index 49ed161781..b9d6b77be1 100644 --- a/tools/simgrid.supp +++ b/tools/simgrid.supp @@ -80,8 +80,6 @@ fun:malloc ... fun:dlopen@@GLIBC_* - ... - fun:main } { @@ -91,8 +89,6 @@ fun:calloc ... fun:dlopen@@GLIBC_* - ... - fun:main } # Memory leaks appearing to be in libcgraph. They can be seen with the