Some C calls have to stay in smpi_gloabl.cpp to not break compatibility, as they are public. So they are now wrapped.
#if HAVE_SMPI
if (SIMIX_process_count()>0){
- if(smpi_process_initialized()){
+ if(smpi_process()->initialized()){
xbt_die("Process exited without calling MPI_Finalize - Killing simulation");
}else{
XBT_WARN("Process called exit when leaving - Skipping cleanups");
type=PJ_type_event_new(#cat, PJ_type_get_root());\
}\
char cont_name[25];\
- snprintf(cont_name,25, "rank-%d", smpi_process_index());\
+ snprintf(cont_name,25, "rank-%d", smpi_process()->index());\
val_t value = PJ_value_get_or_new(Colls::mpi_coll_##cat##_description[i].name,"1.0 1.0 1.0", type);\
new_pajeNewEvent (SIMIX_get_clock(), PJ_container_get(cont_name), type, value);\
}
int index = 0;
for (int src = 0; src < size; src++) {
if (src != root) {
- if (!smpi_process_get_replaying())
+ if (!smpi_process()->replaying())
tmpbufs[index] = xbt_malloc(count * dataext);
else
tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
#endif
PJ_container_new(str, INSTR_SMPI, father);
#if HAVE_PAPI
- papi_counter_t counters = smpi_process_papi_counters();
+ papi_counter_t counters = smpi_process()->papi_counters();
for (auto& it : counters) {
/**
#include "xbt/base.h"
#include "xbt/synchro.h"
#include "xbt/xbt_os_time.h"
+#include "src/smpi/smpi_process.hpp"
#include "src/smpi/smpi_f2c.hpp"
#include "src/smpi/smpi_keyvals.hpp"
#include "src/smpi/smpi_group.hpp"
using namespace simgrid::smpi;
-struct s_smpi_process_data;
-typedef struct s_smpi_process_data *smpi_process_data_t;
#define PERSISTENT 0x1
#define NON_PERSISTENT 0x2
extern XBT_PRIVATE MPI_Comm MPI_COMM_UNINITIALIZED;
-XBT_PRIVATE void smpi_process_destroy();
-XBT_PRIVATE void smpi_process_finalize();
-XBT_PRIVATE int smpi_process_finalized();
-XBT_PRIVATE int smpi_process_initialized();
-XBT_PRIVATE void smpi_process_mark_as_initialized();
-
typedef SMPI_Cart_topology *MPIR_Cart_Topology;
typedef SMPI_Graph_topology *MPIR_Graph_Topology;
typedef SMPI_Dist_Graph_topology *MPIR_Dist_Graph_Topology;
-XBT_PRIVATE smpi_process_data_t smpi_process_data();
-XBT_PRIVATE smpi_process_data_t smpi_process_remote_data(int index);
-// smpi_process_[set/get]_user_data must be public
-/* XBT_PRIVATE void smpi_process_set_user_data(void *); */
-/* XBT_PRIVATE void* smpi_process_get_user_data(void); */
+XBT_PRIVATE Process* smpi_process();
+XBT_PRIVATE Process* smpi_process_remote(int index);
XBT_PRIVATE int smpi_process_count();
-XBT_PRIVATE MPI_Comm smpi_process_comm_world();
-XBT_PRIVATE MPI_Comm smpi_process_get_comm_intra();
-XBT_PRIVATE void smpi_process_set_comm_intra(MPI_Comm comm);
-XBT_PRIVATE smx_mailbox_t smpi_process_mailbox();
-XBT_PRIVATE smx_mailbox_t smpi_process_remote_mailbox(int index);
-XBT_PRIVATE smx_mailbox_t smpi_process_mailbox_small();
-XBT_PRIVATE smx_mailbox_t smpi_process_remote_mailbox_small(int index);
-XBT_PRIVATE xbt_mutex_t smpi_process_mailboxes_mutex();
-XBT_PRIVATE xbt_mutex_t smpi_process_remote_mailboxes_mutex(int index);
-XBT_PRIVATE xbt_os_timer_t smpi_process_timer();
-XBT_PRIVATE void smpi_process_simulated_start();
-XBT_PRIVATE double smpi_process_simulated_elapsed();
-XBT_PRIVATE void smpi_process_set_sampling(int s);
-XBT_PRIVATE int smpi_process_get_sampling();
-XBT_PRIVATE void smpi_process_set_replaying(bool s);
-XBT_PRIVATE bool smpi_process_get_replaying();
-
-XBT_PRIVATE void smpi_deployment_register_process(const char* instance_id, int rank, int index, MPI_Comm** comm,
- msg_bar_t* bar);
+
+XBT_PRIVATE void smpi_deployment_register_process(const char* instance_id, int rank, int index);
+XBT_PRIVATE MPI_Comm* smpi_deployment_comm_world(const char* instance_id);
+XBT_PRIVATE msg_bar_t smpi_deployment_finalization_barrier(const char* instance_id);
XBT_PRIVATE void smpi_deployment_cleanup_instances();
XBT_PRIVATE void smpi_comm_copy_buffer_callback(smx_activity_t comm, void *buff, size_t buff_size);
double smpi_mpi_wtime(){
double time;
- if (smpi_process_initialized() != 0 && smpi_process_finalized() == 0 && smpi_process_get_sampling() == 0) {
+ if (smpi_process()->initialized() != 0 && smpi_process()->finalized() == 0 && smpi_process()->sampling() == 0) {
smpi_bench_end();
time = SIMIX_get_clock();
// to avoid deadlocks if used as a break condition, such as
smx_activity_t action = simcall_execution_start("computation", flops, 1, 0);
simcall_set_category (action, TRACE_internal_smpi_get_category());
simcall_execution_wait(action);
- smpi_switch_data_segment(smpi_process_index());
+ smpi_switch_data_segment(smpi_process()->index());
}
void smpi_execute(double duration)
if (duration >= smpi_cpu_threshold) {
XBT_DEBUG("Sleep for %g to handle real computation time", duration);
double flops = duration * smpi_host_speed;
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
extra->type=TRACING_COMPUTING;
extra->comp_size=flops;
void smpi_bench_begin()
{
if (smpi_privatize_global_variables) {
- smpi_switch_data_segment(smpi_process_index());
+ smpi_switch_data_segment(smpi_process()->index());
}
if (MC_is_active() || MC_record_replay_is_active())
#if HAVE_PAPI
if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
- int event_set = smpi_process_papi_event_set();
+ int event_set = smpi_process()->papi_event_set();
// PAPI_start sets everything to 0! See man(3) PAPI_start
if (PAPI_LOW_LEVEL_INITED == PAPI_is_initialized()) {
if (PAPI_start(event_set) != PAPI_OK) {
}
}
#endif
- xbt_os_threadtimer_start(smpi_process_timer());
+ xbt_os_threadtimer_start(smpi_process()->timer());
}
void smpi_bench_end()
return;
double speedup = 1;
- xbt_os_timer_t timer = smpi_process_timer();
+ xbt_os_timer_t timer = smpi_process()->timer();
xbt_os_threadtimer_stop(timer);
#if HAVE_PAPI
* our PAPI counters for this process.
*/
if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
- papi_counter_t& counter_data = smpi_process_papi_counters();
- int event_set = smpi_process_papi_event_set();
+ papi_counter_t& counter_data = smpi_process()->papi_counters();
+ int event_set = smpi_process()->papi_event_set();
std::vector<long long> event_values = std::vector<long long>(counter_data.size());
if (PAPI_stop(event_set, &event_values[0]) != PAPI_OK) { // Error
} else {
for (unsigned int i = 0; i < counter_data.size(); i++) {
counter_data[i].second += event_values[i];
- // XBT_DEBUG("[%i] PAPI: Counter %s: Value is now %lli (got increment by %lli\n", smpi_process_index(),
+ // XBT_DEBUG("[%i] PAPI: Counter %s: Value is now %lli (got increment by %lli\n", smpi_process()->index(),
// counter_data[i].first.c_str(), counter_data[i].second, event_values[i]);
}
}
}
#endif
- if (smpi_process_get_sampling()) {
+ if (smpi_process()->sampling()) {
XBT_CRITICAL("Cannot do recursive benchmarks.");
XBT_CRITICAL("Are you trying to make a call to MPI within a SMPI_SAMPLE_ block?");
xbt_backtrace_display_current();
if (xbt_cfg_get_string("smpi/comp-adjustment-file")[0] != '\0') { // Maybe we need to artificially speed up or slow
// down our computation based on our statistical analysis.
- smpi_trace_call_location_t* loc = smpi_process_get_call_location();
+ smpi_trace_call_location_t* loc = smpi_process()->call_location();
std::string key = loc->get_composed_key();
std::unordered_map<std::string, double>::const_iterator it = location2speedup.find(key);
if (it != location2speedup.end()) {
#if HAVE_PAPI
if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0' && TRACE_smpi_is_enabled()) {
char container_name[INSTR_DEFAULT_STR_SIZE];
- smpi_container(smpi_process_index(), container_name, INSTR_DEFAULT_STR_SIZE);
+ smpi_container(smpi_process()->index(), container_name, INSTR_DEFAULT_STR_SIZE);
container_t container = PJ_container_get(container_name);
- papi_counter_t& counter_data = smpi_process_papi_counters();
+ papi_counter_t& counter_data = smpi_process()->papi_counters();
for (auto& pair : counter_data) {
new_pajeSetVariable(surf_get_clock(), container,
if (global) {
return bprintf("%s:%d", file, line);
} else {
- return bprintf("%s:%d:%d", file, line, smpi_process_index());
+ return bprintf("%s:%d:%d", file, line, smpi_process()->index());
}
}
char *loc = sample_location(global, file, line);
smpi_bench_end(); /* Take time from previous, unrelated computation into account */
- smpi_process_set_sampling(1);
+ smpi_process()->set_sampling(1);
if (samples==nullptr)
samples = xbt_dict_new_homogeneous(free);
" apply the %fs delay instead",
data->count, data->iters, data->relstderr, data->threshold, data->mean);
smpi_execute(data->mean);
- smpi_process_set_sampling(0);
+ smpi_process()->set_sampling(0);
res = 0; // prepare to capture future, unrelated computations
}
smpi_bench_begin();
THROW_IMPOSSIBLE;
// ok, benchmarking this loop is over
- xbt_os_threadtimer_stop(smpi_process_timer());
+ xbt_os_threadtimer_stop(smpi_process()->timer());
// update the stats
data->count++;
- double sample = xbt_os_timer_elapsed(smpi_process_timer());
+ double sample = xbt_os_timer_elapsed(smpi_process()->timer());
data->sum += sample;
data->sum_pow2 += sample * sample;
double n = static_cast<double>(data->count);
extern "C" { /** These functions will be called from the user code **/
smpi_trace_call_location_t* smpi_trace_get_call_location() {
- return smpi_process_get_call_location();
+ return smpi_process()->call_location();
}
void smpi_trace_set_call_location(const char* file, const int line) {
- smpi_trace_call_location_t* loc = smpi_process_get_call_location();
+ smpi_trace_call_location_t* loc = smpi_process()->call_location();
loc->previous_filename = loc->filename;
loc->previous_linenumber = loc->linenumber;
void Comm::destroy(Comm* comm)
{
if (comm == MPI_COMM_UNINITIALIZED){
- Comm::destroy(smpi_process_comm_world());
+ Comm::destroy(smpi_process()->comm_world());
return;
}
delete comm->topo_; // there's no use count on topos
int Comm::dup(MPI_Comm* newcomm){
if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(smpi_process_index());
+ smpi_switch_data_segment(smpi_process()->index());
}
MPI_Group cp = new Group(this->group());
(*newcomm) = new Comm(cp, this->topo());
MPI_Group Comm::group()
{
if (this == MPI_COMM_UNINITIALIZED)
- return smpi_process_comm_world()->group();
+ return smpi_process()->comm_world()->group();
return group_;
}
int Comm::size()
{
if (this == MPI_COMM_UNINITIALIZED)
- return smpi_process_comm_world()->size();
+ return smpi_process()->comm_world()->size();
return group_->size();
}
int Comm::rank()
{
if (this == MPI_COMM_UNINITIALIZED)
- return smpi_process_comm_world()->rank();
- return group_->rank(smpi_process_index());
+ return smpi_process()->comm_world()->rank();
+ return group_->rank(smpi_process()->index());
}
void Comm::get_name (char* name, int* len)
{
if (this == MPI_COMM_UNINITIALIZED){
- smpi_process_comm_world()->get_name(name, len);
+ smpi_process()->comm_world()->get_name(name, len);
return;
}
if(this == MPI_COMM_WORLD) {
void Comm::set_leaders_comm(MPI_Comm leaders){
if (this == MPI_COMM_UNINITIALIZED){
- smpi_process_comm_world()->set_leaders_comm(leaders);
+ smpi_process()->comm_world()->set_leaders_comm(leaders);
return;
}
leaders_comm_=leaders;
int* Comm::get_non_uniform_map(){
if (this == MPI_COMM_UNINITIALIZED)
- return smpi_process_comm_world()->get_non_uniform_map();
+ return smpi_process()->comm_world()->get_non_uniform_map();
return non_uniform_map_;
}
int* Comm::get_leaders_map(){
if (this == MPI_COMM_UNINITIALIZED)
- return smpi_process_comm_world()->get_leaders_map();
+ return smpi_process()->comm_world()->get_leaders_map();
return leaders_map_;
}
MPI_Comm Comm::get_leaders_comm(){
if (this == MPI_COMM_UNINITIALIZED)
- return smpi_process_comm_world()->get_leaders_comm();
+ return smpi_process()->comm_world()->get_leaders_comm();
return leaders_comm_;
}
MPI_Comm Comm::get_intra_comm(){
if (this == MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD)
- return smpi_process_get_comm_intra();
+ return smpi_process()->comm_intra();
else return intra_comm_;
}
int Comm::is_uniform(){
if (this == MPI_COMM_UNINITIALIZED)
- return smpi_process_comm_world()->is_uniform();
+ return smpi_process()->comm_world()->is_uniform();
return is_uniform_;
}
int Comm::is_blocked(){
if (this == MPI_COMM_UNINITIALIZED)
- return smpi_process_comm_world()->is_blocked();
+ return smpi_process()->comm_world()->is_blocked();
return is_blocked_;
}
MPI_Comm Comm::split(int color, int key)
{
if (this == MPI_COMM_UNINITIALIZED)
- return smpi_process_comm_world()->split(color, key);
+ return smpi_process()->comm_world()->split(color, key);
int system_tag = 123;
int* recvbuf;
void Comm::ref(){
if (this == MPI_COMM_UNINITIALIZED){
- smpi_process_comm_world()->ref();
+ smpi_process()->comm_world()->ref();
return;
}
group_->ref();
void Comm::unref(Comm* comm){
if (comm == MPI_COMM_UNINITIALIZED){
- Comm::unref(smpi_process_comm_world());
+ Comm::unref(smpi_process()->comm_world());
return;
}
comm->refcount_--;
int leader = -1;
if (this == MPI_COMM_UNINITIALIZED)
- smpi_process_comm_world()->init_smp();
+ smpi_process()->comm_world()->init_smp();
int comm_size = this->size();
// If we are in replay - perform an ugly hack
// tell SimGrid we are not in replay for a while, because we need the buffers to be copied for the following calls
bool replaying = false; //cache data to set it back again after
- if(smpi_process_get_replaying()){
+ if(smpi_process()->replaying()){
replaying=true;
- smpi_process_set_replaying(false);
+ smpi_process()->set_replaying(false);
}
if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(smpi_process_index());
+ smpi_switch_data_segment(smpi_process()->index());
}
//identify neighbours in comm
//get the indexes of all processes sharing the same simix host
Coll_allgather_mpich::allgather(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this);
if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(smpi_process_index());
+ smpi_switch_data_segment(smpi_process()->index());
}
if(leaders_map_==nullptr){
leader_comm=this->get_leaders_comm();
Group::unref(leaders_group);
}
- smpi_process_set_comm_intra(comm_intra);
+ smpi_process()->set_comm_intra(comm_intra);
}
int is_uniform = 1;
Coll_bcast_mpich::bcast(&(is_uniform_),1, MPI_INT, 0, comm_intra );
if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(smpi_process_index());
+ smpi_switch_data_segment(smpi_process()->index());
}
// Are the ranks blocked ? = allocated contiguously on the SMP nodes
int is_blocked=1;
xbt_free(leader_list);
if(replaying)
- smpi_process_set_replaying(true);
+ smpi_process()->set_replaying(true);
}
MPI_Comm Comm::f2c(int id) {
void *recvbuf, int recvcount, MPI_Datatype recvtype){
int count;
if(smpi_privatize_global_variables){
- smpi_switch_data_segment(smpi_process_index());
+ smpi_switch_data_segment(smpi_process()->index());
}
/* First check if we really have something to do */
if (recvcount > 0 && recvbuf != sendbuf) {
count = sendcount < recvcount ? sendcount : recvcount;
if(!(sendtype->flags() & DT_FLAG_DERIVED) && !(recvtype->flags() & DT_FLAG_DERIVED)) {
- if(!smpi_process_get_replaying())
+ if(!smpi_process()->replaying())
memcpy(recvbuf, sendbuf, count);
}
else if (!(sendtype->flags() & DT_FLAG_DERIVED))
}
//get the index of the process in the process_data array
-void smpi_deployment_register_process(const char* instance_id, int rank, int index, MPI_Comm** comm, msg_bar_t* bar)
+void smpi_deployment_register_process(const char* instance_id, int rank, int index)
{
if(smpi_instances==nullptr){//no instance registered, we probably used smpirun.
index_to_process_data[index]=index;
- *bar = nullptr;
- *comm = nullptr;
return;
}
instance->present_processes++;
index_to_process_data[index]=instance->index+rank;
instance->comm_world->group()->set_mapping(index, rank);
- *bar = instance->finalization_barrier;
- *comm = &instance->comm_world;
+}
+
+//get the index of the process in the process_data array
+MPI_Comm* smpi_deployment_comm_world(const char* instance_id)
+{
+ if(smpi_instances==nullptr){//no instance registered, we probably used smpirun.
+ return nullptr;
+ }
+ s_smpi_mpi_instance_t* instance =
+ static_cast<s_smpi_mpi_instance_t*>(xbt_dict_get_or_null(smpi_instances, instance_id));
+ xbt_assert(instance, "Error, unknown instance %s", instance_id);
+ return &instance->comm_world;
+}
+
+msg_bar_t smpi_deployment_finalization_barrier(const char* instance_id)
+{
+ if(smpi_instances==nullptr){//no instance registered, we probably used smpirun.
+ return nullptr;
+ }
+ s_smpi_mpi_instance_t* instance =
+ static_cast<s_smpi_mpi_instance_t*>(xbt_dict_get_or_null(smpi_instances, instance_id));
+ xbt_assert(instance, "Error, unknown instance %s", instance_id);
+ return instance->finalization_barrier;
}
void smpi_deployment_cleanup_instances(){
}
char* F2C::get_key_id(char* key, int id) {
- snprintf(key, KEY_SIZE, "%x_%d",id, smpi_process_index());
+ snprintf(key, KEY_SIZE, "%x_%d",id, smpi_process()->index());
return key;
}
}
static char* get_key_id(char* key, int id) {
- snprintf(key, KEY_SIZE, "%x_%d",id, smpi_process_index());
+ snprintf(key, KEY_SIZE, "%x_%d",id, smpi_process()->index());
return key;
}
#endif
std::unordered_map<std::string, double> location2speedup;
-typedef struct s_smpi_process_data {
- double simulated;
- int *argc;
- char ***argv;
- simgrid::s4u::MailboxPtr mailbox;
- simgrid::s4u::MailboxPtr mailbox_small;
- xbt_mutex_t mailboxes_mutex;
- xbt_os_timer_t timer;
- MPI_Comm comm_self;
- MPI_Comm comm_intra;
- MPI_Comm* comm_world;
- void *data; /* user data */
- int index;
- char state;
- int sampling; /* inside an SMPI_SAMPLE_ block? */
- char* instance_id;
- bool replaying; /* is the process replaying a trace */
- msg_bar_t finalization_barrier;
- int return_value;
- smpi_trace_call_location_t trace_call_loc;
-#if HAVE_PAPI
- /** Contains hardware data as read by PAPI **/
- int papi_event_set;
- papi_counter_t papi_counter_data;
-#endif
-} s_smpi_process_data_t;
-
-static smpi_process_data_t *process_data = nullptr;
+Process **process_data = nullptr;
int process_count = 0;
int smpi_universe_size = 0;
int* index_to_process_data = nullptr;
void (*smpi_comm_copy_data_callback) (smx_activity_t, void*, size_t) = &smpi_comm_copy_buffer_callback;
-#define MAILBOX_NAME_MAXLEN (5 + sizeof(int) * 2 + 1)
-
-static char *get_mailbox_name(char *str, int index)
-{
- snprintf(str, MAILBOX_NAME_MAXLEN, "SMPI-%0*x", static_cast<int> (sizeof(int) * 2), index);
- return str;
-}
-static char *get_mailbox_name_small(char *str, int index)
-{
- snprintf(str, MAILBOX_NAME_MAXLEN, "small%0*x", static_cast<int> (sizeof(int) * 2), index);
- return str;
-}
-
-void smpi_process_init(int *argc, char ***argv)
-{
-
- if (process_data == nullptr){
- printf("SimGrid was not initialized properly before entering MPI_Init. Aborting, please check compilation process and use smpirun\n");
- exit(1);
- }
- if (argc != nullptr && argv != nullptr) {
- smx_actor_t proc = SIMIX_process_self();
- proc->context->set_cleanup(&MSG_process_cleanup_from_SIMIX);
- char* instance_id = (*argv)[1];
- int rank = xbt_str_parse_int((*argv)[2], "Invalid rank: %s");
- int index = smpi_process_index_of_smx_process(proc);
-
- if(index_to_process_data == nullptr){
- index_to_process_data=static_cast<int*>(xbt_malloc(SIMIX_process_count()*sizeof(int)));
- }
-
- if(smpi_privatize_global_variables){
- /* Now using segment index of the process */
- index = proc->segment_index;
- /* Done at the process's creation */
- SMPI_switch_data_segment(index);
- }
- MPI_Comm* temp_comm_world;
- msg_bar_t temp_bar;
- smpi_deployment_register_process(instance_id, rank, index, &temp_comm_world, &temp_bar);
- smpi_process_data_t data = smpi_process_remote_data(index);
- data->comm_world = temp_comm_world;
- if(temp_bar != nullptr)
- data->finalization_barrier = temp_bar;
- data->index = index;
- data->instance_id = instance_id;
- data->replaying = false;
-
- static_cast<simgrid::MsgActorExt*>(proc->data)->data = data;
-
- if (*argc > 3) {
- memmove(&(*argv)[0], &(*argv)[2], sizeof(char *) * (*argc - 2));
- (*argv)[(*argc) - 1] = nullptr;
- (*argv)[(*argc) - 2] = nullptr;
- }
- (*argc)-=2;
- data->argc = argc;
- data->argv = argv;
- // set the process attached to the mailbox
- data->mailbox_small->setReceiver(simgrid::s4u::Actor::self());
- XBT_DEBUG("<%d> New process in the game: %p", index, proc);
- }
- xbt_assert(smpi_process_data(),
- "smpi_process_data() returned nullptr. You probably gave a nullptr parameter to MPI_Init. "
- "Although it's required by MPI-2, this is currently not supported by SMPI.");
-}
-
-void smpi_process_destroy()
+int smpi_process_count()
{
- int index = smpi_process_index();
- if(smpi_privatize_global_variables){
- smpi_switch_data_segment(index);
- }
- process_data[index_to_process_data[index]]->state = SMPI_FINALIZED;
- XBT_DEBUG("<%d> Process left the game", index);
+ return process_count;
}
-/** @brief Prepares the current process for termination. */
-void smpi_process_finalize()
+Process* smpi_process()
{
- // This leads to an explosion of the search graph which cannot be reduced:
- if(MC_is_active() || MC_record_replay_is_active())
- return;
-
- int index = smpi_process_index();
- // wait for all pending asynchronous comms to finish
- MSG_barrier_wait(process_data[index_to_process_data[index]]->finalization_barrier);
+ simgrid::MsgActorExt* msgExt = static_cast<simgrid::MsgActorExt*>(SIMIX_process_self()->data);
+ return static_cast<Process*>(msgExt->data);
}
-/** @brief Check if a process is finalized */
-int smpi_process_finalized()
+Process* smpi_process_remote(int index)
{
- int index = smpi_process_index();
- if (index != MPI_UNDEFINED)
- return (process_data[index_to_process_data[index]]->state == SMPI_FINALIZED);
- else
- return 0;
+ return process_data[index_to_process_data[index]];
}
-/** @brief Check if a process is initialized */
-int smpi_process_initialized()
-{
- if (index_to_process_data == nullptr){
- return false;
- } else{
- int index = smpi_process_index();
- return ((index != MPI_UNDEFINED) && (process_data[index_to_process_data[index]]->state == SMPI_INITIALIZED));
- }
+MPI_Comm smpi_process_comm_self(){
+ return smpi_process()->comm_self();
}
-/** @brief Mark a process as initialized (=MPI_Init called) */
-void smpi_process_mark_as_initialized()
-{
- int index = smpi_process_index();
- if ((index != MPI_UNDEFINED) && (process_data[index_to_process_data[index]]->state != SMPI_FINALIZED))
- process_data[index_to_process_data[index]]->state = SMPI_INITIALIZED;
+void smpi_process_init(int *argc, char ***argv){
+ Process::init(argc, argv);
}
-void smpi_process_set_replaying(bool value){
- int index = smpi_process_index();
- if ((index != MPI_UNDEFINED) && (process_data[index_to_process_data[index]]->state != SMPI_FINALIZED))
- process_data[index_to_process_data[index]]->replaying = value;
+int smpi_process_index(){
+ return smpi_process()->index();
}
-bool smpi_process_get_replaying(){
- int index = smpi_process_index();
- if (index != MPI_UNDEFINED)
- return process_data[index_to_process_data[index]]->replaying;
- else
- return false;
-}
int smpi_global_size()
{
return xbt_str_parse_int(value, "SMPI_GLOBAL_SIZE contains a non-numerical value: %s");
}
-smpi_process_data_t smpi_process_data()
-{
- simgrid::MsgActorExt* msgExt = static_cast<simgrid::MsgActorExt*>(SIMIX_process_self()->data);
- return static_cast<smpi_process_data_t>(msgExt->data);
-}
-
-smpi_process_data_t smpi_process_remote_data(int index)
-{
- return process_data[index_to_process_data[index]];
-}
-
-void smpi_process_set_user_data(void *data)
-{
- smpi_process_data_t process_data = smpi_process_data();
- process_data->data = data;
-}
-
-void *smpi_process_get_user_data()
-{
- smpi_process_data_t process_data = smpi_process_data();
- return process_data->data;
-}
-
-int smpi_process_count()
-{
- return process_count;
-}
-
-/**
- * \brief Returns a structure that stores the location (filename + linenumber)
- * of the last calls to MPI_* functions.
- *
- * \see smpi_trace_set_call_location
- */
-smpi_trace_call_location_t* smpi_process_get_call_location()
-{
- smpi_process_data_t process_data = smpi_process_data();
- return &process_data->trace_call_loc;
-}
-
-int smpi_process_index()
-{
- smpi_process_data_t data = smpi_process_data();
- //return -1 if not initialized
- return data != nullptr ? data->index : MPI_UNDEFINED;
-}
-
-MPI_Comm smpi_process_comm_world()
-{
- smpi_process_data_t data = smpi_process_data();
- //return MPI_COMM_NULL if not initialized
- return data != nullptr ? *data->comm_world : MPI_COMM_NULL;
-}
-
-smx_mailbox_t smpi_process_mailbox()
-{
- smpi_process_data_t data = smpi_process_data();
- return data->mailbox->getImpl();
-}
-
-smx_mailbox_t smpi_process_mailbox_small()
-{
- smpi_process_data_t data = smpi_process_data();
- return data->mailbox_small->getImpl();
-}
-
-xbt_mutex_t smpi_process_mailboxes_mutex()
-{
- smpi_process_data_t data = smpi_process_data();
- return data->mailboxes_mutex;
-}
-
-smx_mailbox_t smpi_process_remote_mailbox(int index)
-{
- smpi_process_data_t data = smpi_process_remote_data(index);
- return data->mailbox->getImpl();
-}
-
-smx_mailbox_t smpi_process_remote_mailbox_small(int index)
-{
- smpi_process_data_t data = smpi_process_remote_data(index);
- return data->mailbox_small->getImpl();
-}
-
-xbt_mutex_t smpi_process_remote_mailboxes_mutex(int index)
-{
- smpi_process_data_t data = smpi_process_remote_data(index);
- return data->mailboxes_mutex;
-}
-
-#if HAVE_PAPI
-int smpi_process_papi_event_set(void)
-{
- smpi_process_data_t data = smpi_process_data();
- return data->papi_event_set;
-}
-
-papi_counter_t& smpi_process_papi_counters(void)
-{
- smpi_process_data_t data = smpi_process_data();
- return data->papi_counter_data;
-}
-#endif
-
-xbt_os_timer_t smpi_process_timer()
-{
- smpi_process_data_t data = smpi_process_data();
- return data->timer;
-}
-
-void smpi_process_simulated_start()
-{
- smpi_process_data_t data = smpi_process_data();
- data->simulated = SIMIX_get_clock();
-}
-
-double smpi_process_simulated_elapsed()
-{
- smpi_process_data_t data = smpi_process_data();
- return SIMIX_get_clock() - data->simulated;
-}
-
-MPI_Comm smpi_process_comm_self()
-{
- smpi_process_data_t data = smpi_process_data();
- if(data->comm_self==MPI_COMM_NULL){
- MPI_Group group = new Group(1);
- data->comm_self = new Comm(group, nullptr);
- group->set_mapping(smpi_process_index(), 0);
- }
-
- return data->comm_self;
-}
-
-MPI_Comm smpi_process_get_comm_intra()
-{
- smpi_process_data_t data = smpi_process_data();
- return data->comm_intra;
-}
-
-void smpi_process_set_comm_intra(MPI_Comm comm)
-{
- smpi_process_data_t data = smpi_process_data();
- data->comm_intra = comm;
-}
-
-void smpi_process_set_sampling(int s)
-{
- smpi_process_data_t data = smpi_process_data();
- data->sampling = s;
-}
-
-int smpi_process_get_sampling()
-{
- smpi_process_data_t data = smpi_process_data();
- return data->sampling;
-}
-
void smpi_comm_set_copy_data_callback(void (*callback) (smx_activity_t, void*, size_t))
{
smpi_comm_copy_data_callback = callback;
XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !");
smpi_switch_data_segment(
- (static_cast<smpi_process_data_t>((static_cast<simgrid::MsgActorExt*>(comm->src_proc->data)->data))->index));
+ (static_cast<Process*>((static_cast<simgrid::MsgActorExt*>(comm->src_proc->data)->data))->index()));
tmpbuff = static_cast<void*>(xbt_malloc(buff_size));
memcpy(tmpbuff, buff, buff_size);
}
&& ((char*)comm->dst_buff < smpi_start_data_exe + smpi_size_data_exe )){
XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment");
smpi_switch_data_segment(
- (static_cast<smpi_process_data_t>((static_cast<simgrid::MsgActorExt*>(comm->dst_proc->data)->data))->index));
+ (static_cast<Process*>((static_cast<simgrid::MsgActorExt*>(comm->dst_proc->data)->data))->index()));
}
memcpy(comm->dst_buff, tmpbuff, buff_size);
{
int i;
MPI_Group group;
- char name[MAILBOX_NAME_MAXLEN];
int smpirun=0;
if (!MC_is_active()) {
smpirun=1;
}
smpi_universe_size = process_count;
- process_data = new smpi_process_data_t[process_count];
+ process_data = new Process*[process_count];
for (i = 0; i < process_count; i++) {
- process_data[i] = new s_smpi_process_data_t;
- process_data[i]->argc = nullptr;
- process_data[i]->argv = nullptr;
- process_data[i]->mailbox = simgrid::s4u::Mailbox::byName(get_mailbox_name(name, i));
- process_data[i]->mailbox_small = simgrid::s4u::Mailbox::byName(get_mailbox_name_small(name, i));
- process_data[i]->mailboxes_mutex = xbt_mutex_init();
- process_data[i]->timer = xbt_os_timer_new();
- if (MC_is_active())
- MC_ignore_heap(process_data[i]->timer, xbt_os_timer_size());
- process_data[i]->comm_self = MPI_COMM_NULL;
- process_data[i]->comm_intra = MPI_COMM_NULL;
- process_data[i]->comm_world = nullptr;
- process_data[i]->state = SMPI_UNINITIALIZED;
- process_data[i]->sampling = 0;
- process_data[i]->finalization_barrier = nullptr;
- process_data[i]->return_value = 0;
-
-#if HAVE_PAPI
- if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
- // TODO: Implement host/process/thread based counters. This implementation
- // just always takes the values passed via "default", like this:
- // "default:COUNTER1:COUNTER2:COUNTER3;".
- auto it = units2papi_setup.find(papi_default_config_name);
- if (it != units2papi_setup.end()) {
- process_data[i]->papi_event_set = it->second.event_set;
- process_data[i]->papi_counter_data = it->second.counter_data;
- XBT_DEBUG("Setting PAPI set for process %i", i);
- } else {
- process_data[i]->papi_event_set = PAPI_NULL;
- XBT_DEBUG("No PAPI set for process %i", i);
- }
- }
-#endif
+ process_data[i] = new Process(i);
}
//if the process was launched through smpirun script we generate a global mpi_comm_world
//if not, we let MPI_COMM_NULL, and the comm world will be private to each mpi instance
for (i = 0; i < process_count; i++) {
group->set_mapping(i, i);
- process_data[i]->finalization_barrier = bar;
+ process_data[i]->set_finalization_barrier(bar);
}
}
}
smpi_bench_destroy();
if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
delete MPI_COMM_WORLD->group();
- MSG_barrier_destroy(process_data[0]->finalization_barrier);
+ MSG_barrier_destroy(process_data[0]->finalization_barrier());
}else{
smpi_deployment_cleanup_instances();
}
for (int i = 0; i < count; i++) {
- if(process_data[i]->comm_self!=MPI_COMM_NULL){
- Comm::destroy(process_data[i]->comm_self);
+ if(process_data[i]->comm_self()!=MPI_COMM_NULL){
+ Comm::destroy(process_data[i]->comm_self());
}
- if(process_data[i]->comm_intra!=MPI_COMM_NULL){
- Comm::destroy(process_data[i]->comm_intra);
+ if(process_data[i]->comm_intra()!=MPI_COMM_NULL){
+ Comm::destroy(process_data[i]->comm_intra());
}
- xbt_os_timer_free(process_data[i]->timer);
- xbt_mutex_destroy(process_data[i]->mailboxes_mutex);
+ xbt_os_timer_free(process_data[i]->timer());
+ xbt_mutex_destroy(process_data[i]->mailboxes_mutex());
delete process_data[i];
}
delete[] process_data;
int __attribute__ ((weak)) smpi_simulated_main_(int argc, char **argv)
{
- smpi_process_init(&argc, &argv);
+ Process::init(&argc, &argv);
user_main_();
return 0;
}
int ret = smpi_simulated_main_(argc,argv);
if(ret !=0){
XBT_WARN("SMPI process did not return 0. Return value : %d", ret);
- smpi_process_data()->return_value=ret;
+ smpi_process()->set_return_value(ret);
}
return 0;
}
int count = smpi_process_count();
int i, ret=0;
for (i = 0; i < count; i++) {
- if(process_data[i]->return_value!=0){
- ret=process_data[i]->return_value;//return first non 0 value
+ if(process_data[i]->return_value()!=0){
+ ret=process_data[i]->return_value();//return first non 0 value
break;
}
}
{
if(smpi_privatize_global_variables){//we need to switch as the called function may silently touch global variables
XBT_DEBUG("Applying operation, switch to the right data frame ");
- smpi_switch_data_segment(smpi_process_index());
+ smpi_switch_data_segment(smpi_process()->index());
}
- if(!smpi_process_get_replaying()){
+ if(!smpi_process()->replaying()){
if(! is_fortran_op_)
this->func_(invec, inoutvec, len, &datatype);
else{
int already_init;
MPI_Initialized(&already_init);
if(already_init == 0){
- smpi_process_init(argc, argv);
- smpi_process_mark_as_initialized();
- int rank = smpi_process_index();
+ Process::init(argc, argv);
+ smpi_process()->mark_as_initialized();
+ int rank = smpi_process()->index();
TRACE_smpi_init(rank);
TRACE_smpi_computing_init(rank);
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
int PMPI_Finalize()
{
smpi_bench_end();
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
extra->type = TRACING_FINALIZE;
TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
- smpi_process_finalize();
+ smpi_process()->finalize();
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
- TRACE_smpi_finalize(smpi_process_index());
- smpi_process_destroy();
+ TRACE_smpi_finalize(smpi_process()->index());
+ smpi_process()->destroy();
return MPI_SUCCESS;
}
int PMPI_Finalized(int* flag)
{
- *flag=smpi_process_finalized();
+ *flag=smpi_process()!=nullptr ? smpi_process()->finalized() : 0;
return MPI_SUCCESS;
}
if (flag == nullptr) {
return MPI_ERR_ARG;
} else {
- *flag = smpi_process_index() == 0;
+ *flag = smpi_process()->index() == 0;
return MPI_SUCCESS;
}
}
int PMPI_Abort(MPI_Comm comm, int errorcode)
{
smpi_bench_end();
- smpi_process_destroy();
+ smpi_process()->destroy();
// FIXME: should kill all processes in comm instead
simcall_process_kill(SIMIX_process_self());
return MPI_SUCCESS;
} else if (rank == nullptr) {
return MPI_ERR_ARG;
} else {
- *rank = group->rank(smpi_process_index());
+ *rank = group->rank(smpi_process()->index());
return MPI_SUCCESS;
}
}
return MPI_ERR_GROUP;
} else if (newcomm == nullptr) {
return MPI_ERR_ARG;
- } else if(group->rank(smpi_process_index())==MPI_UNDEFINED){
+ } else if(group->rank(smpi_process()->index())==MPI_UNDEFINED){
*newcomm= MPI_COMM_NULL;
return MPI_SUCCESS;
}else{
retval = MPI_ERR_TAG;
} else {
- int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
int src_traced = comm->group()->index(src);
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
} else if(tag<0 && tag != MPI_ANY_TAG){
retval = MPI_ERR_TAG;
} else {
- int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
int dst_traced = comm->group()->index(dst);
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
extra->type = TRACING_ISEND;
} else if(tag<0 && tag != MPI_ANY_TAG){
retval = MPI_ERR_TAG;
} else {
- int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
int dst_traced = comm->group()->index(dst);
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
extra->type = TRACING_ISSEND;
} else if(tag<0 && tag != MPI_ANY_TAG){
retval = MPI_ERR_TAG;
} else {
- int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
int src_traced = comm->group()->index(src);
instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
extra->type = TRACING_RECV;
} else if(tag < 0 && tag != MPI_ANY_TAG){
retval = MPI_ERR_TAG;
} else {
- int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
int dst_traced = comm->group()->index(dst);
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
extra->type = TRACING_SEND;
} else if(tag<0 && tag != MPI_ANY_TAG){
retval = MPI_ERR_TAG;
} else {
- int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
int dst_traced = comm->group()->index(dst);
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
extra->type = TRACING_SSEND;
retval = MPI_ERR_TAG;
} else {
- int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
int dst_traced = comm->group()->index(dst);
int src_traced = comm->group()->index(src);
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
smpi_empty_status(status);
retval = MPI_SUCCESS;
} else {
- int rank = ((*request)->comm() != MPI_COMM_NULL) ? smpi_process_index() : -1;
+ int rank = ((*request)->comm() != MPI_COMM_NULL) ? smpi_process()->index() : -1;
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
extra->type = TRACING_TEST;
retval = MPI_SUCCESS;
} else {
- int rank = (request!=nullptr && (*request)->comm() != MPI_COMM_NULL) ? smpi_process_index() : -1;
+ int rank = (request!=nullptr && (*request)->comm() != MPI_COMM_NULL) ? smpi_process()->index() : -1;
int src_traced = (*request)->src();
int dst_traced = (*request)->dst();
savedvals[i]=(savedvalstype){req->src(), req->dst(), (req->flags() & RECV), req->tag(), req->comm()};
}
}
- int rank_traced = smpi_process_index();
+ int rank_traced = smpi_process()->index();
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
extra->type = TRACING_WAITANY;
extra->send_size=count;
savedvals[i].valid=0;
}
}
- int rank_traced = smpi_process_index();
+ int rank_traced = smpi_process()->index();
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
extra->type = TRACING_WAITALL;
extra->send_size=count;
} else if (!datatype->is_valid()) {
retval = MPI_ERR_ARG;
} else {
- int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
int root_traced = comm->group()->index(root);
instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
if (comm == MPI_COMM_NULL) {
retval = MPI_ERR_COMM;
} else {
- int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
extra->type = TRACING_BARRIER;
TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
sendtmpcount=0;
sendtmptype=recvtype;
}
- int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
int root_traced = comm->group()->index(root);
instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
extra->type = TRACING_GATHER;
sendtmptype=recvtype;
}
- int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
int root_traced = comm->group()->index(root);
int i = 0;
int size = comm->size();
sendcount=recvcount;
sendtype=recvtype;
}
- int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
extra->type = TRACING_ALLGATHER;
int known = 0;
sendcount=recvcounts[comm->rank()];
sendtype=recvtype;
}
- int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
int i = 0;
int size = comm->size();
instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
recvtype = sendtype;
recvcount = sendcount;
}
- int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
int root_traced = comm->group()->index(root);
instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
extra->type = TRACING_SCATTER;
recvtype = sendtype;
recvcount = sendcounts[comm->rank()];
}
- int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
int root_traced = comm->group()->index(root);
int i = 0;
int size = comm->size();
} else if (!datatype->is_valid() || op == MPI_OP_NULL) {
retval = MPI_ERR_ARG;
} else {
- int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
int root_traced = comm->group()->index(root);
instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
extra->type = TRACING_REDUCE;
sendtmpbuf = static_cast<char*>(xbt_malloc(count*datatype->get_extent()));
Datatype::copy(recvbuf, count, datatype,sendtmpbuf, count, datatype);
}
- int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
extra->type = TRACING_ALLREDUCE;
int known = 0;
} else if (op == MPI_OP_NULL) {
retval = MPI_ERR_OP;
} else {
- int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
extra->type = TRACING_SCAN;
int known = 0;
} else if (op == MPI_OP_NULL) {
retval = MPI_ERR_OP;
} else {
- int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
extra->type = TRACING_EXSCAN;
int known = 0;
} else if (recvcounts == nullptr) {
retval = MPI_ERR_ARG;
} else {
- int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
int i = 0;
int size = comm->size();
instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
} else {
int count = comm->size();
- int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
extra->type = TRACING_REDUCE_SCATTER;
extra->num_processes = count;
} else if ((sendbuf != MPI_IN_PLACE && sendtype == MPI_DATATYPE_NULL) || recvtype == MPI_DATATYPE_NULL) {
retval = MPI_ERR_TYPE;
} else {
- int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
extra->type = TRACING_ALLTOALL;
recvdisps == nullptr) {
retval = MPI_ERR_ARG;
} else {
- int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
+ int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
int i = 0;
int size = comm->size();
instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
}
int PMPI_Initialized(int* flag) {
- *flag=smpi_process_initialized();
+ *flag=(smpi_process()!=nullptr && smpi_process()->initialized());
return MPI_SUCCESS;
}
if (win == MPI_WIN_NULL) {
retval = MPI_ERR_WIN;
} else {
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr);
retval = win->fence(assert);
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
} else if ((!origin_datatype->is_valid()) || (!target_datatype->is_valid())) {
retval = MPI_ERR_TYPE;
} else {
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
MPI_Group group;
win->get_group(&group);
int src_traced = group->index(target_rank);
} else if ((!origin_datatype->is_valid()) || (!target_datatype->is_valid())) {
retval = MPI_ERR_TYPE;
} else {
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
MPI_Group group;
win->get_group(&group);
int dst_traced = group->index(target_rank);
} else if (op == MPI_OP_NULL) {
retval = MPI_ERR_OP;
} else {
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
MPI_Group group;
win->get_group(&group);
int src_traced = group->index(target_rank);
} else if (group==MPI_GROUP_NULL){
retval = MPI_ERR_GROUP;
} else {
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr);
retval = win->post(group,assert);
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
} else if (group==MPI_GROUP_NULL){
retval = MPI_ERR_GROUP;
} else {
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr);
retval = win->start(group,assert);
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
if (win == MPI_WIN_NULL) {
retval = MPI_ERR_WIN;
} else {
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr);
retval = win->complete();
if (win == MPI_WIN_NULL) {
retval = MPI_ERR_WIN;
} else {
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
TRACE_smpi_collective_in(rank, -1, __FUNCTION__, nullptr);
retval = win->wait();
--- /dev/null
+#include "mc/mc.h"
+#include "private.h"
+#include "simgrid/s4u/Mailbox.hpp"
+#include "src/kernel/activity/SynchroComm.hpp"
+#include "src/mc/mc_record.h"
+#include "src/mc/mc_replay.h"
+#include "src/msg/msg_private.h"
+#include "src/simix/smx_private.h"
+#include "surf/surf.h"
+#include "xbt/replay.hpp"
+
+XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_process, smpi, "Logging specific to SMPI (kernel)");
+
+//TODO : replace
+extern Process **process_data;
+extern int* index_to_process_data;
+
+#define MAILBOX_NAME_MAXLEN (5 + sizeof(int) * 2 + 1)
+
+static char *get_mailbox_name(char *str, int index)
+{
+ snprintf(str, MAILBOX_NAME_MAXLEN, "SMPI-%0*x", static_cast<int> (sizeof(int) * 2), index);
+ return str;
+}
+
+static char *get_mailbox_name_small(char *str, int index)
+{
+ snprintf(str, MAILBOX_NAME_MAXLEN, "small%0*x", static_cast<int> (sizeof(int) * 2), index);
+ return str;
+}
+
+namespace simgrid{
+namespace smpi{
+
+Process::Process(int index)
+{
+ char name[MAILBOX_NAME_MAXLEN];
+ index_ = MPI_UNDEFINED;
+ argc_ = nullptr;
+ argv_ = nullptr;
+ mailbox_ = simgrid::s4u::Mailbox::byName(get_mailbox_name(name, index));
+ mailbox_small_ = simgrid::s4u::Mailbox::byName(get_mailbox_name_small(name, index));
+ mailboxes_mutex_ = xbt_mutex_init();
+ timer_ = xbt_os_timer_new();
+ if (MC_is_active())
+ MC_ignore_heap(timer_, xbt_os_timer_size());
+ comm_self_ = MPI_COMM_NULL;
+ comm_intra_ = MPI_COMM_NULL;
+ comm_world_ = nullptr;
+ state_ = SMPI_UNINITIALIZED;
+ sampling_ = 0;
+ finalization_barrier_ = nullptr;
+ return_value_ = 0;
+
+#if HAVE_PAPI
+ if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
+ // TODO: Implement host/process/thread based counters. This implementation
+ // just always takes the values passed via "default", like this:
+ // "default:COUNTER1:COUNTER2:COUNTER3;".
+ auto it = units2papi_setup.find(papi_default_config_name);
+ if (it != units2papi_setup.end()) {
+ papi_event_set_ = it->second.event_set;
+ papi_counter_data_ = it->second.counter_data;
+ XBT_DEBUG("Setting PAPI set for process %i", i);
+ } else {
+ papi_event_set_ = PAPI_NULL;
+ XBT_DEBUG("No PAPI set for process %i", i);
+ }
+ }
+#endif
+}
+
+void Process::set_data(int index, int *argc, char ***argv)
+{
+
+ char* instance_id = (*argv)[1];
+ comm_world_ = smpi_deployment_comm_world(instance_id);
+ msg_bar_t bar = smpi_deployment_finalization_barrier(instance_id);
+ if (bar!=nullptr) // don't overwrite the default one
+ finalization_barrier_ = bar;
+ index_ = index;
+ instance_id_ = instance_id;
+ replaying_ = false;
+
+ static_cast<simgrid::MsgActorExt*>(SIMIX_process_self()->data)->data = this;
+
+ if (*argc > 3) {
+ memmove(&(*argv)[0], &(*argv)[2], sizeof(char *) * (*argc - 2));
+ (*argv)[(*argc) - 1] = nullptr;
+ (*argv)[(*argc) - 2] = nullptr;
+ }
+ (*argc)-=2;
+ argc_ = argc;
+ argv_ = argv;
+ // set the process attached to the mailbox
+ mailbox_small_->setReceiver(simgrid::s4u::Actor::self());
+ XBT_DEBUG("<%d> New process in the game: %p", index, SIMIX_process_self());
+}
+
+void Process::destroy()
+{
+ if(smpi_privatize_global_variables){
+ smpi_switch_data_segment(index_);
+ }
+ state_ = SMPI_FINALIZED;
+ XBT_DEBUG("<%d> Process left the game", index_);
+}
+
+/** @brief Prepares the current process for termination. */
+void Process::finalize()
+{
+ // This leads to an explosion of the search graph which cannot be reduced:
+ if(MC_is_active() || MC_record_replay_is_active())
+ return;
+ // wait for all pending asynchronous comms to finish
+ MSG_barrier_wait(finalization_barrier_);
+}
+
+/** @brief Check if a process is finalized */
+int Process::finalized()
+{
+ if (index_ != MPI_UNDEFINED)
+ return (state_ == SMPI_FINALIZED);
+ else
+ return 0;
+}
+
+/** @brief Check if a process is initialized */
+int Process::initialized()
+{
+ if (index_to_process_data == nullptr){
+ return false;
+ } else{
+ return ((index_ != MPI_UNDEFINED) && (state_ == SMPI_INITIALIZED));
+ }
+}
+
+/** @brief Mark a process as initialized (=MPI_Init called) */
+void Process::mark_as_initialized()
+{
+ if ((index_ != MPI_UNDEFINED) && (state_ != SMPI_FINALIZED))
+ state_ = SMPI_INITIALIZED;
+}
+
+void Process::set_replaying(bool value){
+ if ((index_ != MPI_UNDEFINED) && (state_ != SMPI_FINALIZED))
+ replaying_ = value;
+}
+
+bool Process::replaying(){
+ if (index_ != MPI_UNDEFINED)
+ return replaying_;
+ else
+ return false;
+}
+
+void Process::set_user_data(void *data)
+{
+ data_ = data;
+}
+
+void *Process::get_user_data()
+{
+ return data_;
+}
+
+
+/**
+ * \brief Returns a structure that stores the location (filename + linenumber)
+ * of the last calls to MPI_* functions.
+ *
+ * \see smpi_trace_set_call_location
+ */
+smpi_trace_call_location_t* Process::call_location()
+{
+ return &trace_call_loc_;
+}
+
+int Process::index()
+{
+ return index_;
+}
+
+MPI_Comm Process::comm_world()
+{
+ return comm_world_==nullptr ? MPI_COMM_NULL : *comm_world_;
+}
+
+smx_mailbox_t Process::mailbox()
+{
+ return mailbox_->getImpl();
+}
+
+smx_mailbox_t Process::mailbox_small()
+{
+ return mailbox_small_->getImpl();
+}
+
+xbt_mutex_t Process::mailboxes_mutex()
+{
+ return mailboxes_mutex_;
+}
+
+#if HAVE_PAPI
+int Process::papi_event_set(void)
+{
+ return papi_event_set_;
+}
+
+papi_counter_t& smpi_process_papi_counters(void)
+{
+ return papi_counter_data_;
+}
+#endif
+
+xbt_os_timer_t Process::timer()
+{
+ return timer_;
+}
+
+void Process::simulated_start()
+{
+ simulated_ = SIMIX_get_clock();
+}
+
+double Process::simulated_elapsed()
+{
+ return SIMIX_get_clock() - simulated_;
+}
+
+MPI_Comm Process::comm_self()
+{
+ if(comm_self_==MPI_COMM_NULL){
+ MPI_Group group = new Group(1);
+ comm_self_ = new Comm(group, nullptr);
+ group->set_mapping(index_, 0);
+ }
+ return comm_self_;
+}
+
+MPI_Comm Process::comm_intra()
+{
+ return comm_intra_;
+}
+
+void Process::set_comm_intra(MPI_Comm comm)
+{
+ comm_intra_ = comm;
+}
+
+void Process::set_sampling(int s)
+{
+ sampling_ = s;
+}
+
+int Process::sampling()
+{
+ return sampling_;
+}
+
+void Process::set_finalization_barrier(msg_bar_t bar){
+ finalization_barrier_=bar;
+}
+
+msg_bar_t Process::finalization_barrier(){
+ return finalization_barrier_;
+}
+
+int Process::return_value(){
+ return return_value_;
+}
+
+void Process::set_return_value(int val){
+ return_value_=val;
+}
+
+void Process::init(int *argc, char ***argv){
+
+ if (process_data == nullptr){
+ printf("SimGrid was not initialized properly before entering MPI_Init. Aborting, please check compilation process and use smpirun\n");
+ exit(1);
+ }
+ if (argc != nullptr && argv != nullptr) {
+ smx_actor_t proc = SIMIX_process_self();
+ proc->context->set_cleanup(&MSG_process_cleanup_from_SIMIX);
+
+ int index = smpi_process_index_of_smx_process(proc);
+
+ if(index_to_process_data == nullptr){
+ index_to_process_data=static_cast<int*>(xbt_malloc(SIMIX_process_count()*sizeof(int)));
+ }
+
+ char* instance_id = (*argv)[1];
+ int rank = xbt_str_parse_int((*argv)[2], "Invalid rank: %s");
+ smpi_deployment_register_process(instance_id, rank, index);
+
+ if(smpi_privatize_global_variables){
+ /* Now using segment index of the process */
+ index = proc->segment_index;
+ /* Done at the process's creation */
+ SMPI_switch_data_segment(index);
+ }
+
+ Process* process = smpi_process_remote(index);
+ process->set_data(index, argc, argv);
+ }
+ xbt_assert(smpi_process(),
+ "smpi_process() returned nullptr. You probably gave a nullptr parameter to MPI_Init. "
+ "Although it's required by MPI-2, this is currently not supported by SMPI.");
+}
+
+}
+}
--- /dev/null
+/* Copyright (c) 2009-2010, 2012-2014. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#ifndef SMPI_PROCESS_HPP
+#define SMPI_PROCESS_HPP
+
+
+#include <xbt/base.h>
+#include "src/instr/instr_smpi.h"
+#include "private.h"
+#include "simgrid/s4u/Mailbox.hpp"
+
+namespace simgrid{
+namespace smpi{
+
+class Process {
+ private:
+ double simulated_;
+ int *argc_;
+ char ***argv_;
+ simgrid::s4u::MailboxPtr mailbox_;
+ simgrid::s4u::MailboxPtr mailbox_small_;
+ xbt_mutex_t mailboxes_mutex_;
+ xbt_os_timer_t timer_;
+ MPI_Comm comm_self_;
+ MPI_Comm comm_intra_;
+ MPI_Comm* comm_world_;
+ void *data_; /* user data */
+ int index_;
+ char state_;
+ int sampling_; /* inside an SMPI_SAMPLE_ block? */
+ char* instance_id_;
+ bool replaying_; /* is the process replaying a trace */
+ msg_bar_t finalization_barrier_;
+ int return_value_;
+ smpi_trace_call_location_t trace_call_loc_;
+#if HAVE_PAPI
+ /** Contains hardware data as read by PAPI **/
+ int papi_event_set_;
+ papi_counter_t papi_counter_data_;
+#endif
+ public:
+ Process(int index);
+ void destroy();
+ void set_data(int index, int *argc, char ***argv);
+ void finalize();
+ int finalized();
+ int initialized();
+ void mark_as_initialized();
+ void set_replaying(bool value);
+ bool replaying();
+ void set_user_data(void *data);
+ void *get_user_data();
+ smpi_trace_call_location_t* call_location();
+ int index();
+ MPI_Comm comm_world();
+ smx_mailbox_t mailbox();
+ smx_mailbox_t mailbox_small();
+ xbt_mutex_t mailboxes_mutex();
+ #if HAVE_PAPI
+ int papi_event_set(void);
+ papi_counter_t& papi_counters(void);
+ #endif
+ xbt_os_timer_t timer();
+ void simulated_start();
+ double simulated_elapsed();
+ MPI_Comm comm_self();
+ MPI_Comm comm_intra();
+ void set_comm_intra(MPI_Comm comm);
+ void set_sampling(int s);
+ int sampling();
+ msg_bar_t finalization_barrier();
+ void set_finalization_barrier(msg_bar_t bar);
+ int return_value();
+ void set_return_value(int val);
+ static void init(int *argc, char ***argv);
+};
+
+
+}
+}
+
+#endif
static void log_timed_action (const char *const *action, double clock){
if (XBT_LOG_ISENABLED(smpi_replay, xbt_log_priority_verbose)){
char *name = xbt_str_join_array(action, " ");
- XBT_VERB("%s %f", name, smpi_process_simulated_elapsed()-clock);
+ XBT_VERB("%s %f", name, smpi_process()->simulated_elapsed()-clock);
xbt_free(name);
}
}
static std::vector<MPI_Request>* get_reqq_self()
{
- return reqq.at(smpi_process_index());
+ return reqq.at(smpi_process()->index());
}
static void set_reqq_self(std::vector<MPI_Request> *mpi_request)
{
- reqq.insert({smpi_process_index(), mpi_request});
+ reqq.insert({smpi_process()->index(), mpi_request});
}
//allocate a single buffer for all sends, growing it if needed
void* smpi_get_tmp_sendbuffer(int size)
{
- if (!smpi_process_get_replaying())
+ if (!smpi_process()->replaying())
return xbt_malloc(size);
if (sendbuffer_size<size){
sendbuffer=static_cast<char*>(xbt_realloc(sendbuffer,size));
//allocate a single buffer for all recv
void* smpi_get_tmp_recvbuffer(int size){
- if (!smpi_process_get_replaying())
+ if (!smpi_process()->replaying())
return xbt_malloc(size);
if (recvbuffer_size<size){
recvbuffer=static_cast<char*>(xbt_realloc(recvbuffer,size));
}
void smpi_free_tmp_buffer(void* buf){
- if (!smpi_process_get_replaying())
+ if (!smpi_process()->replaying())
xbt_free(buf);
}
else MPI_DEFAULT_TYPE= MPI_BYTE; // default TAU datatype
/* start a simulated timer */
- smpi_process_simulated_start();
+ smpi_process()->simulated_start();
/*initialize the number of active processes */
active_processes = smpi_process_count();
static void action_comm_size(const char *const *action)
{
communicator_size = parse_double(action[2]);
- log_timed_action (action, smpi_process_simulated_elapsed());
+ log_timed_action (action, smpi_process()->simulated_elapsed());
}
static void action_comm_split(const char *const *action)
{
- log_timed_action (action, smpi_process_simulated_elapsed());
+ log_timed_action (action, smpi_process()->simulated_elapsed());
}
static void action_comm_dup(const char *const *action)
{
- log_timed_action (action, smpi_process_simulated_elapsed());
+ log_timed_action (action, smpi_process()->simulated_elapsed());
}
static void action_compute(const char *const *action)
{
CHECK_ACTION_PARAMS(action, 1, 0)
- double clock = smpi_process_simulated_elapsed();
+ double clock = smpi_process()->simulated_elapsed();
double flops= parse_double(action[2]);
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
extra->type=TRACING_COMPUTING;
extra->comp_size=flops;
CHECK_ACTION_PARAMS(action, 2, 1)
int to = atoi(action[2]);
double size=parse_double(action[3]);
- double clock = smpi_process_simulated_elapsed();
+ double clock = smpi_process()->simulated_elapsed();
if(action[4])
MPI_CURRENT_TYPE=decode_datatype(action[4]);
else
MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE;
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
int dst_traced = MPI_COMM_WORLD->group()->rank(to);
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
CHECK_ACTION_PARAMS(action, 2, 1)
int to = atoi(action[2]);
double size=parse_double(action[3]);
- double clock = smpi_process_simulated_elapsed();
+ double clock = smpi_process()->simulated_elapsed();
if(action[4])
MPI_CURRENT_TYPE=decode_datatype(action[4]);
else
MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE;
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
int dst_traced = MPI_COMM_WORLD->group()->rank(to);
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
extra->type = TRACING_ISEND;
CHECK_ACTION_PARAMS(action, 2, 1)
int from = atoi(action[2]);
double size=parse_double(action[3]);
- double clock = smpi_process_simulated_elapsed();
+ double clock = smpi_process()->simulated_elapsed();
MPI_Status status;
if(action[4])
else
MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE;
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
int src_traced = MPI_COMM_WORLD->group()->rank(from);
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
CHECK_ACTION_PARAMS(action, 2, 1)
int from = atoi(action[2]);
double size=parse_double(action[3]);
- double clock = smpi_process_simulated_elapsed();
+ double clock = smpi_process()->simulated_elapsed();
if(action[4])
MPI_CURRENT_TYPE=decode_datatype(action[4]);
else
MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE;
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
int src_traced = MPI_COMM_WORLD->group()->rank(from);
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
extra->type = TRACING_IRECV;
static void action_test(const char *const *action){
CHECK_ACTION_PARAMS(action, 0, 0)
- double clock = smpi_process_simulated_elapsed();
+ double clock = smpi_process()->simulated_elapsed();
MPI_Status status;
MPI_Request request = get_reqq_self()->back();
//Different times in traced application and replayed version may lead to this
//In this case, ignore the extra calls.
if(request!=nullptr){
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
extra->type=TRACING_TEST;
TRACE_smpi_testing_in(rank, extra);
static void action_wait(const char *const *action){
CHECK_ACTION_PARAMS(action, 0, 0)
- double clock = smpi_process_simulated_elapsed();
+ double clock = smpi_process()->simulated_elapsed();
MPI_Status status;
xbt_assert(get_reqq_self()->size(), "action wait not preceded by any irecv or isend: %s",
static void action_waitall(const char *const *action){
CHECK_ACTION_PARAMS(action, 0, 0)
- double clock = smpi_process_simulated_elapsed();
+ double clock = smpi_process()->simulated_elapsed();
unsigned int count_requests=get_reqq_self()->size();
if (count_requests>0) {
MPI_Status status[count_requests];
- int rank_traced = smpi_process_index();
+ int rank_traced = smpi_process()->index();
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
extra->type = TRACING_WAITALL;
extra->send_size=count_requests;
}
static void action_barrier(const char *const *action){
- double clock = smpi_process_simulated_elapsed();
- int rank = smpi_process_index();
+ double clock = smpi_process()->simulated_elapsed();
+ int rank = smpi_process()->index();
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
extra->type = TRACING_BARRIER;
TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
{
CHECK_ACTION_PARAMS(action, 1, 2)
double size = parse_double(action[2]);
- double clock = smpi_process_simulated_elapsed();
+ double clock = smpi_process()->simulated_elapsed();
int root=0;
/* Initialize MPI_CURRENT_TYPE in order to decrease the number of the checks */
MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE;
MPI_CURRENT_TYPE=decode_datatype(action[4]);
}
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
int root_traced = MPI_COMM_WORLD->group()->index(root);
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
CHECK_ACTION_PARAMS(action, 2, 2)
double comm_size = parse_double(action[2]);
double comp_size = parse_double(action[3]);
- double clock = smpi_process_simulated_elapsed();
+ double clock = smpi_process()->simulated_elapsed();
int root=0;
MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE;
MPI_CURRENT_TYPE=decode_datatype(action[5]);
}
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
int root_traced = MPI_COMM_WORLD->group()->rank(root);
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
extra->type = TRACING_REDUCE;
else
MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE;
- double clock = smpi_process_simulated_elapsed();
- int rank = smpi_process_index();
+ double clock = smpi_process()->simulated_elapsed();
+ int rank = smpi_process()->index();
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
extra->type = TRACING_ALLREDUCE;
extra->send_size = comm_size;
static void action_allToAll(const char *const *action) {
CHECK_ACTION_PARAMS(action, 2, 2) //two mandatory (send and recv volumes) and two optional (corresponding datatypes)
- double clock = smpi_process_simulated_elapsed();
+ double clock = smpi_process()->simulated_elapsed();
int comm_size = MPI_COMM_WORLD->size();
int send_size = parse_double(action[2]);
int recv_size = parse_double(action[3]);
void *send = smpi_get_tmp_sendbuffer(send_size*comm_size* MPI_CURRENT_TYPE->size());
void *recv = smpi_get_tmp_recvbuffer(recv_size*comm_size* MPI_CURRENT_TYPE2->size());
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
extra->type = TRACING_ALLTOALL;
extra->send_size = send_size;
5) 0 is the recv datatype id, see decode_datatype()
*/
CHECK_ACTION_PARAMS(action, 2, 3)
- double clock = smpi_process_simulated_elapsed();
+ double clock = smpi_process()->simulated_elapsed();
int comm_size = MPI_COMM_WORLD->size();
int send_size = parse_double(action[2]);
int recv_size = parse_double(action[3]);
extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE, nullptr);
extra->datatype2 = encode_datatype(MPI_CURRENT_TYPE2, nullptr);
- TRACE_smpi_collective_in(smpi_process_index(), root, __FUNCTION__, extra);
+ TRACE_smpi_collective_in(smpi_process()->index(), root, __FUNCTION__, extra);
Colls::gather(send, send_size, MPI_CURRENT_TYPE, recv, recv_size, MPI_CURRENT_TYPE2, root, MPI_COMM_WORLD);
- TRACE_smpi_collective_out(smpi_process_index(), -1, __FUNCTION__);
+ TRACE_smpi_collective_out(smpi_process()->index(), -1, __FUNCTION__);
log_timed_action (action, clock);
}
4) 0 is the send datatype id, see decode_datatype()
5) 0 is the recv datatype id, see decode_datatype()
*/
- double clock = smpi_process_simulated_elapsed();
+ double clock = smpi_process()->simulated_elapsed();
int comm_size = MPI_COMM_WORLD->size();
CHECK_ACTION_PARAMS(action, comm_size+1, 2)
int send_size = parse_double(action[2]);
extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE, nullptr);
extra->datatype2 = encode_datatype(MPI_CURRENT_TYPE2, nullptr);
- TRACE_smpi_collective_in(smpi_process_index(), root, __FUNCTION__, extra);
+ TRACE_smpi_collective_in(smpi_process()->index(), root, __FUNCTION__, extra);
Colls::gatherv(send, send_size, MPI_CURRENT_TYPE, recv, recvcounts, disps, MPI_CURRENT_TYPE2, root, MPI_COMM_WORLD);
- TRACE_smpi_collective_out(smpi_process_index(), -1, __FUNCTION__);
+ TRACE_smpi_collective_out(smpi_process()->index(), -1, __FUNCTION__);
log_timed_action (action, clock);
}
2) The value 11346849 is the amount of instructions
3) The last value corresponds to the datatype, see decode_datatype().
*/
- double clock = smpi_process_simulated_elapsed();
+ double clock = smpi_process()->simulated_elapsed();
int comm_size = MPI_COMM_WORLD->size();
CHECK_ACTION_PARAMS(action, comm_size+1, 1)
int comp_size = parse_double(action[2+comm_size]);
int recvcounts[comm_size];
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
int size = 0;
if(action[3+comm_size])
MPI_CURRENT_TYPE=decode_datatype(action[3+comm_size]);
2) 275427 is the recvcount
3) No more values mean that the datatype for sent and receive buffer is the default one, see decode_datatype().
*/
- double clock = smpi_process_simulated_elapsed();
+ double clock = smpi_process()->simulated_elapsed();
CHECK_ACTION_PARAMS(action, 2, 2)
int sendcount=atoi(action[2]);
void *sendbuf = smpi_get_tmp_sendbuffer(sendcount* MPI_CURRENT_TYPE->size());
void *recvbuf = smpi_get_tmp_recvbuffer(recvcount* MPI_CURRENT_TYPE2->size());
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
extra->type = TRACING_ALLGATHER;
extra->send_size = sendcount;
2) The next four elements declare the recvcounts array
3) No more values mean that the datatype for sent and receive buffer is the default one, see decode_datatype().
*/
- double clock = smpi_process_simulated_elapsed();
+ double clock = smpi_process()->simulated_elapsed();
int comm_size = MPI_COMM_WORLD->size();
CHECK_ACTION_PARAMS(action, comm_size+1, 2)
}
void *recvbuf = smpi_get_tmp_recvbuffer(recv_sum* MPI_CURRENT_TYPE2->size());
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
extra->type = TRACING_ALLGATHERV;
extra->send_size = sendcount;
3) 100*sizeof(int) is the size of the receiver buffer
4) 1 70 10 5 is the recvcounts array
*/
- double clock = smpi_process_simulated_elapsed();
+ double clock = smpi_process()->simulated_elapsed();
int comm_size = MPI_COMM_WORLD->size();
CHECK_ACTION_PARAMS(action, 2*comm_size+2, 2)
recvdisps[i] = 0;
}
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
extra->type = TRACING_ALLTOALLV;
extra->recvcounts= xbt_new(int, comm_size);
void smpi_replay_run(int *argc, char***argv){
/* First initializes everything */
- smpi_process_init(argc, argv);
- smpi_process_mark_as_initialized();
- smpi_process_set_replaying(true);
+ Process::init(argc, argv);
+ smpi_process()->mark_as_initialized();
+ smpi_process()->set_replaying(true);
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
TRACE_smpi_init(rank);
TRACE_smpi_computing_init(rank);
instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
if(active_processes==0){
/* Last process alive speaking: end the simulated timer */
- XBT_INFO("Simulation time %f", smpi_process_simulated_elapsed());
+ XBT_INFO("Simulation time %f", smpi_process()->simulated_elapsed());
xbt_free(sendbuffer);
xbt_free(recvbuffer);
}
operation =bprintf("%s_finalize",__FUNCTION__);
TRACE_smpi_collective_in(rank, -1, operation, extra_fin);
- smpi_process_finalize();
+ smpi_process()->finalize();
TRACE_smpi_collective_out(rank, -1, operation);
- TRACE_smpi_finalize(smpi_process_index());
- smpi_process_destroy();
+ TRACE_smpi_finalize(smpi_process()->index());
+ smpi_process()->destroy();
xbt_free(operation);
}
MPI_Request Request::send_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
- return new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(),
+ return new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
comm->group()->index(dst), tag, comm, PERSISTENT | SEND | PREPARED);
}
MPI_Request Request::ssend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
- return new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(),
+ return new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
comm->group()->index(dst), tag, comm, PERSISTENT | SSEND | SEND | PREPARED);
}
MPI_Request Request::isend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
- return new Request(buf==MPI_BOTTOM ? nullptr : buf , count, datatype, smpi_process_index(),
+ return new Request(buf==MPI_BOTTOM ? nullptr : buf , count, datatype, smpi_process()->index(),
comm->group()->index(dst), tag,comm, PERSISTENT | ISEND | SEND | PREPARED);
}
{
return new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype,
src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->index(src),
- smpi_process_index(), tag, comm, PERSISTENT | RECV | PREPARED);
+ smpi_process()->index(), tag, comm, PERSISTENT | RECV | PREPARED);
}
MPI_Request Request::rma_recv_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
MPI_Request Request::irecv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
{
return new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE :
- comm->group()->index(src), smpi_process_index(), tag,
+ comm->group()->index(src), smpi_process()->index(), tag,
comm, PERSISTENT | RECV | PREPARED);
}
MPI_Request Request::isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(),
+ request = new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
comm->group()->index(dst), tag, comm, NON_PERSISTENT | ISEND | SEND);
request->start();
return request;
MPI_Request Request::issend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(),
+ request = new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
comm->group()->index(dst), tag,comm, NON_PERSISTENT | ISEND | SSEND | SEND);
request->start();
return request;
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE :
- comm->group()->index(src), smpi_process_index(), tag, comm,
+ comm->group()->index(src), smpi_process()->index(), tag, comm,
NON_PERSISTENT | RECV);
request->start();
return request;
void Request::send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(),
+ request = new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
comm->group()->index(dst), tag, comm, NON_PERSISTENT | SEND);
request->start();
void Request::ssend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(),
+ request = new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
comm->group()->index(dst), tag, comm, NON_PERSISTENT | SSEND | SEND);
request->start();
{
MPI_Request requests[2];
MPI_Status stats[2];
- int myid=smpi_process_index();
+ int myid=smpi_process()->index();
if ((comm->group()->index(dst) == myid) && (comm->group()->index(src) == myid)){
Datatype::copy(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype);
return;
int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
- xbt_mutex_t mut = smpi_process_mailboxes_mutex();
+ xbt_mutex_t mut = smpi_process()->mailboxes_mutex();
if (async_small_thresh != 0 || (flags_ & RMA) != 0)
xbt_mutex_acquire(mut);
if (async_small_thresh == 0 && (flags_ & RMA) == 0 ) {
- mailbox = smpi_process_mailbox();
+ mailbox = smpi_process()->mailbox();
}
else if (((flags_ & RMA) != 0) || static_cast<int>(size_) < async_small_thresh) {
//We have to check both mailboxes (because SSEND messages are sent to the large mbox).
//begin with the more appropriate one : the small one.
- mailbox = smpi_process_mailbox_small();
+ mailbox = smpi_process()->mailbox_small();
XBT_DEBUG("Is there a corresponding send already posted in the small mailbox %p (in case of SSEND)?", mailbox);
smx_activity_t action = simcall_comm_iprobe(mailbox, 0, src_,tag_, &match_recv,
static_cast<void*>(this));
if (action == nullptr) {
- mailbox = smpi_process_mailbox();
+ mailbox = smpi_process()->mailbox();
XBT_DEBUG("No, nothing in the small mailbox test the other one : %p", mailbox);
action = simcall_comm_iprobe(mailbox, 0, src_,tag_, &match_recv, static_cast<void*>(this));
if (action == nullptr) {
XBT_DEBUG("Still nothing, switch back to the small mailbox : %p", mailbox);
- mailbox = smpi_process_mailbox_small();
+ mailbox = smpi_process()->mailbox_small();
}
} else {
XBT_DEBUG("yes there was something for us in the large mailbox");
}
} else {
- mailbox = smpi_process_mailbox_small();
+ mailbox = smpi_process()->mailbox_small();
XBT_DEBUG("Is there a corresponding send already posted the small mailbox?");
smx_activity_t action = simcall_comm_iprobe(mailbox, 0, src_,tag_, &match_recv, static_cast<void*>(this));
if (action == nullptr) {
XBT_DEBUG("No, nothing in the permanent receive mailbox");
- mailbox = smpi_process_mailbox();
+ mailbox = smpi_process()->mailbox();
} else {
XBT_DEBUG("yes there was something for us in the small mailbox");
}
// we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
real_size_=size_;
action_ = simcall_comm_irecv(SIMIX_process_self(), mailbox, buf_, &real_size_, &match_recv,
- ! smpi_process_get_replaying()? smpi_comm_copy_data_callback
+ ! smpi_process()->replaying()? smpi_comm_copy_data_callback
: &smpi_comm_null_copy_buffer_callback, this, -1.0);
XBT_DEBUG("recv simcall posted");
refcount_++;
if(!(old_type_->flags() & DT_FLAG_DERIVED)){
oldbuf = buf_;
- if (!smpi_process_get_replaying() && oldbuf != nullptr && size_!=0){
+ if (!smpi_process()->replaying() && oldbuf != nullptr && size_!=0){
if((smpi_privatize_global_variables != 0)
&& (static_cast<char*>(buf_) >= smpi_start_data_exe)
&& (static_cast<char*>(buf_) < smpi_start_data_exe + smpi_size_data_exe )){
int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
- xbt_mutex_t mut=smpi_process_remote_mailboxes_mutex(receiver);
+ xbt_mutex_t mut=smpi_process_remote(receiver)->mailboxes_mutex();
if (async_small_thresh != 0 || (flags_ & RMA) != 0)
xbt_mutex_acquire(mut);
if (!(async_small_thresh != 0 || (flags_ & RMA) !=0)) {
- mailbox = smpi_process_remote_mailbox(receiver);
+ mailbox = smpi_process_remote(receiver)->mailbox();
} else if (((flags_ & RMA) != 0) || static_cast<int>(size_) < async_small_thresh) { // eager mode
- mailbox = smpi_process_remote_mailbox(receiver);
+ mailbox = smpi_process_remote(receiver)->mailbox();
XBT_DEBUG("Is there a corresponding recv already posted in the large mailbox %p?", mailbox);
smx_activity_t action = simcall_comm_iprobe(mailbox, 1,dst_, tag_, &match_send,
static_cast<void*>(this));
if (action == nullptr) {
if ((flags_ & SSEND) == 0){
- mailbox = smpi_process_remote_mailbox_small(receiver);
+ mailbox = smpi_process_remote(receiver)->mailbox_small();
XBT_DEBUG("No, nothing in the large mailbox, message is to be sent on the small one %p", mailbox);
} else {
- mailbox = smpi_process_remote_mailbox_small(receiver);
+ mailbox = smpi_process_remote(receiver)->mailbox_small();
XBT_DEBUG("SSEND : Is there a corresponding recv already posted in the small mailbox %p?", mailbox);
action = simcall_comm_iprobe(mailbox, 1,dst_, tag_, &match_send, static_cast<void*>(this));
if (action == nullptr) {
XBT_DEBUG("No, we are first, send to large mailbox");
- mailbox = smpi_process_remote_mailbox(receiver);
+ mailbox = smpi_process_remote(receiver)->mailbox();
}
}
} else {
XBT_DEBUG("Yes there was something for us in the large mailbox");
}
} else {
- mailbox = smpi_process_remote_mailbox(receiver);
+ mailbox = smpi_process_remote(receiver)->mailbox();
XBT_DEBUG("Send request %p is in the large mailbox %p (buf: %p)",mailbox, this,buf_);
}
action_ = simcall_comm_isend(SIMIX_process_from_PID(src_+1), mailbox, size_, -1.0,
buf, real_size_, &match_send,
&xbt_free_f, // how to free the userdata if a detached send fails
- !smpi_process_get_replaying() ? smpi_comm_copy_data_callback
+ !smpi_process()->replaying() ? smpi_comm_copy_data_callback
: &smpi_comm_null_copy_buffer_callback, this,
// detach if msg size < eager/rdv switch limit
detached_);
request->print_request("New iprobe");
// We have to test both mailboxes as we don't know if we will receive one one or another
if (xbt_cfg_get_int("smpi/async-small-thresh") > 0){
- mailbox = smpi_process_mailbox_small();
+ mailbox = smpi_process()->mailbox_small();
XBT_DEBUG("Trying to probe the perm recv mailbox");
request->action_ = simcall_comm_iprobe(mailbox, 0, request->src_, request->tag_, &match_recv,
static_cast<void*>(request));
}
if (request->action_ == nullptr){
- mailbox = smpi_process_mailbox();
+ mailbox = smpi_process()->mailbox();
XBT_DEBUG("trying to probe the other mailbox");
request->action_ = simcall_comm_iprobe(mailbox, 0, request->src_,request->tag_, &match_recv,
static_cast<void*>(request));
MPI_Datatype datatype = req->old_type_;
if(((req->flags_ & ACCUMULATE) != 0) || (datatype->flags() & DT_FLAG_DERIVED)){
- if (!smpi_process_get_replaying()){
+ if (!smpi_process()->replaying()){
if( smpi_privatize_global_variables != 0 && (static_cast<char*>(req->old_buf_) >= smpi_start_data_exe)
&& ((char*)req->old_buf_ < smpi_start_data_exe + smpi_size_data_exe )){
XBT_VERB("Privatization : We are unserializing to a zone in global memory Switch data segment ");
- smpi_switch_data_segment(smpi_process_index());
+ smpi_switch_data_segment(smpi_process()->index());
}
}
}
if (TRACE_smpi_view_internals() && ((req->flags_ & RECV) != 0)){
- int rank = smpi_process_index();
+ int rank = smpi_process()->index();
int src_traced = (req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_);
TRACE_smpi_recv(rank, src_traced, rank,req->tag_);
}
if(target_rank != comm_->rank()){
//prepare send_request
- MPI_Request sreq = Request::rma_send_init(origin_addr, origin_count, origin_datatype, smpi_process_index(),
+ MPI_Request sreq = Request::rma_send_init(origin_addr, origin_count, origin_datatype, smpi_process()->index(),
comm_->group()->index(target_rank), SMPI_RMA_TAG+1, comm_, MPI_OP_NULL);
//prepare receiver request
- MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, smpi_process_index(),
+ MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, smpi_process()->index(),
comm_->group()->index(target_rank), SMPI_RMA_TAG+1, recv_win->comm_, MPI_OP_NULL);
//push request to receiver's win
if(target_rank != comm_->rank()){
//prepare send_request
MPI_Request sreq = Request::rma_send_init(send_addr, target_count, target_datatype,
- comm_->group()->index(target_rank), smpi_process_index(), SMPI_RMA_TAG+2, send_win->comm_,
+ comm_->group()->index(target_rank), smpi_process()->index(), SMPI_RMA_TAG+2, send_win->comm_,
MPI_OP_NULL);
//prepare receiver request
MPI_Request rreq = Request::rma_recv_init(origin_addr, origin_count, origin_datatype,
- comm_->group()->index(target_rank), smpi_process_index(), SMPI_RMA_TAG+2, comm_,
+ comm_->group()->index(target_rank), smpi_process()->index(), SMPI_RMA_TAG+2, comm_,
MPI_OP_NULL);
//start the send, with another process than us as sender.
//As the tag will be used for ordering of the operations, add count to it
//prepare send_request
MPI_Request sreq = Request::rma_send_init(origin_addr, origin_count, origin_datatype,
- smpi_process_index(), comm_->group()->index(target_rank), SMPI_RMA_TAG+3+count_, comm_, op);
+ smpi_process()->index(), comm_->group()->index(target_rank), SMPI_RMA_TAG+3+count_, comm_, op);
//prepare receiver request
MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype,
- smpi_process_index(), comm_->group()->index(target_rank), SMPI_RMA_TAG+3+count_, recv_win->comm_, op);
+ smpi_process()->index(), comm_->group()->index(target_rank), SMPI_RMA_TAG+3+count_, recv_win->comm_, op);
count_++;
//push request to receiver's win
while (j != size) {
int src = group->index(j);
- if (src != smpi_process_index() && src != MPI_UNDEFINED) {
+ if (src != smpi_process()->index() && src != MPI_UNDEFINED) {
reqs[i] = Request::irecv_init(nullptr, 0, MPI_CHAR, src, SMPI_RMA_TAG + 4, MPI_COMM_WORLD);
i++;
}
while(j!=size){
int dst=group->index(j);
- if(dst!=smpi_process_index() && dst!=MPI_UNDEFINED){
+ if(dst!=smpi_process()->index() && dst!=MPI_UNDEFINED){
reqs[i]=Request::send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG+4, MPI_COMM_WORLD);
i++;
}
while(j!=size){
int dst=group_->index(j);
- if(dst!=smpi_process_index() && dst!=MPI_UNDEFINED){
+ if(dst!=smpi_process()->index() && dst!=MPI_UNDEFINED){
reqs[i]=Request::send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG+5, MPI_COMM_WORLD);
i++;
}
while(j!=size){
int src=group_->index(j);
- if(src!=smpi_process_index() && src!=MPI_UNDEFINED){
+ if(src!=smpi_process()->index() && src!=MPI_UNDEFINED){
reqs[i]=Request::irecv_init(nullptr, 0, MPI_CHAR, src,SMPI_RMA_TAG+5, MPI_COMM_WORLD);
i++;
}
src/smpi/smpi_datatype_derived.hpp
src/smpi/smpi_op.cpp
src/smpi/smpi_op.hpp
+ src/smpi/smpi_process.cpp
+ src/smpi/smpi_process.hpp
src/smpi/smpi_pmpi.cpp
src/smpi/smpi_replay.cpp
src/smpi/smpi_request.cpp