1 /* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
9 #include "simgrid/s4u/Mailbox.hpp"
10 #include "simgrid/sg_config.h"
11 #include "src/kernel/activity/SynchroComm.hpp"
12 #include "src/mc/mc_record.h"
13 #include "src/mc/mc_replay.h"
14 #include "src/msg/msg_private.h"
15 #include "src/simix/smx_private.h"
16 #include "surf/surf.h"
17 #include "xbt/replay.h"
19 #include <float.h> /* DBL_MAX */
28 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_kernel, smpi, "Logging specific to SMPI (kernel)");
29 #include <boost/tokenizer.hpp>
30 #include <boost/algorithm/string.hpp> /* trim_right / trim_left */
34 const char* papi_default_config_name = "default";
36 struct papi_process_data {
37 papi_counter_t counter_data;
42 std::unordered_map<std::string, double> location2speedup;
44 typedef struct s_smpi_process_data {
48 simgrid::s4u::MailboxPtr mailbox;
49 simgrid::s4u::MailboxPtr mailbox_small;
50 xbt_mutex_t mailboxes_mutex;
55 void *data; /* user data */
58 int sampling; /* inside an SMPI_SAMPLE_ block? */
60 bool replaying; /* is the process replaying a trace */
61 msg_bar_t finalization_barrier;
63 smpi_trace_call_location_t trace_call_loc;
65 /** Contains hardware data as read by PAPI **/
67 papi_counter_t papi_counter_data;
69 } s_smpi_process_data_t;
71 static smpi_process_data_t *process_data = nullptr;
72 int process_count = 0;
73 int smpi_universe_size = 0;
74 int* index_to_process_data = nullptr;
75 extern double smpi_total_benched_time;
76 extern xbt_dict_t smpi_comm_keyvals;
77 xbt_os_timer_t global_timer;
78 MPI_Comm MPI_COMM_WORLD = MPI_COMM_UNINITIALIZED;
79 MPI_Errhandler *MPI_ERRORS_RETURN = nullptr;
80 MPI_Errhandler *MPI_ERRORS_ARE_FATAL = nullptr;
81 MPI_Errhandler *MPI_ERRHANDLER_NULL = nullptr;
83 void (*smpi_comm_copy_data_callback) (smx_activity_t, void*, size_t) = &smpi_comm_copy_buffer_callback;
85 #define MAILBOX_NAME_MAXLEN (5 + sizeof(int) * 2 + 1)
87 static char *get_mailbox_name(char *str, int index)
89 snprintf(str, MAILBOX_NAME_MAXLEN, "SMPI-%0*x", static_cast<int> (sizeof(int) * 2), index);
93 static char *get_mailbox_name_small(char *str, int index)
95 snprintf(str, MAILBOX_NAME_MAXLEN, "small%0*x", static_cast<int> (sizeof(int) * 2), index);
99 void smpi_process_init(int *argc, char ***argv)
102 if (process_data == nullptr){
103 printf("SimGrid was not initialized properly before entering MPI_Init. Aborting, please check compilation process and use smpirun\n");
106 if (argc != nullptr && argv != nullptr) {
107 smx_actor_t proc = SIMIX_process_self();
108 proc->context->set_cleanup(&MSG_process_cleanup_from_SIMIX);
109 char* instance_id = (*argv)[1];
110 int rank = xbt_str_parse_int((*argv)[2], "Invalid rank: %s");
111 int index = smpi_process_index_of_smx_process(proc);
113 if(index_to_process_data == nullptr){
114 index_to_process_data=static_cast<int*>(xbt_malloc(SIMIX_process_count()*sizeof(int)));
117 if(smpi_privatize_global_variables){
118 /* Now using segment index of the process */
119 index = proc->segment_index;
120 /* Done at the process's creation */
121 SMPI_switch_data_segment(index);
124 MPI_Comm* temp_comm_world;
126 smpi_deployment_register_process(instance_id, rank, index, &temp_comm_world, &temp_bar);
127 smpi_process_data_t data = smpi_process_remote_data(index);
128 data->comm_world = temp_comm_world;
129 if(temp_bar != nullptr)
130 data->finalization_barrier = temp_bar;
132 data->instance_id = instance_id;
133 data->replaying = false;
135 static_cast<simgrid::MsgActorExt*>(proc->data)->data = data;
138 memmove(&(*argv)[0], &(*argv)[2], sizeof(char *) * (*argc - 2));
139 (*argv)[(*argc) - 1] = nullptr;
140 (*argv)[(*argc) - 2] = nullptr;
145 // set the process attached to the mailbox
146 data->mailbox_small->setReceiver(simgrid::s4u::Actor::self());
147 XBT_DEBUG("<%d> New process in the game: %p", index, proc);
149 xbt_assert(smpi_process_data(),
150 "smpi_process_data() returned nullptr. You probably gave a nullptr parameter to MPI_Init. "
151 "Although it's required by MPI-2, this is currently not supported by SMPI.");
154 void smpi_process_destroy()
156 int index = smpi_process_index();
157 if(smpi_privatize_global_variables){
158 smpi_switch_data_segment(index);
160 process_data[index_to_process_data[index]]->state = SMPI_FINALIZED;
161 XBT_DEBUG("<%d> Process left the game", index);
164 /** @brief Prepares the current process for termination. */
165 void smpi_process_finalize()
167 // This leads to an explosion of the search graph which cannot be reduced:
168 if(MC_is_active() || MC_record_replay_is_active())
171 int index = smpi_process_index();
172 // wait for all pending asynchronous comms to finish
173 MSG_barrier_wait(process_data[index_to_process_data[index]]->finalization_barrier);
176 /** @brief Check if a process is finalized */
177 int smpi_process_finalized()
179 int index = smpi_process_index();
180 if (index != MPI_UNDEFINED)
181 return (process_data[index_to_process_data[index]]->state == SMPI_FINALIZED);
186 /** @brief Check if a process is initialized */
187 int smpi_process_initialized()
189 if (index_to_process_data == nullptr){
192 int index = smpi_process_index();
193 return ((index != MPI_UNDEFINED) && (process_data[index_to_process_data[index]]->state == SMPI_INITIALIZED));
197 /** @brief Mark a process as initialized (=MPI_Init called) */
198 void smpi_process_mark_as_initialized()
200 int index = smpi_process_index();
201 if ((index != MPI_UNDEFINED) && (process_data[index_to_process_data[index]]->state != SMPI_FINALIZED))
202 process_data[index_to_process_data[index]]->state = SMPI_INITIALIZED;
205 void smpi_process_set_replaying(bool value){
206 int index = smpi_process_index();
207 if ((index != MPI_UNDEFINED) && (process_data[index_to_process_data[index]]->state != SMPI_FINALIZED))
208 process_data[index_to_process_data[index]]->replaying = value;
211 bool smpi_process_get_replaying(){
212 int index = smpi_process_index();
213 if (index != MPI_UNDEFINED)
214 return process_data[index_to_process_data[index]]->replaying;
215 else return (_xbt_replay_is_active() != 0);
218 int smpi_global_size()
220 char *value = getenv("SMPI_GLOBAL_SIZE");
221 xbt_assert(value,"Please set env var SMPI_GLOBAL_SIZE to the expected number of processes.");
223 return xbt_str_parse_int(value, "SMPI_GLOBAL_SIZE contains a non-numerical value: %s");
226 smpi_process_data_t smpi_process_data()
228 simgrid::MsgActorExt* msgExt = static_cast<simgrid::MsgActorExt*>(SIMIX_process_self()->data);
229 return static_cast<smpi_process_data_t>(msgExt->data);
232 smpi_process_data_t smpi_process_remote_data(int index)
234 return process_data[index_to_process_data[index]];
237 void smpi_process_set_user_data(void *data)
239 smpi_process_data_t process_data = smpi_process_data();
240 process_data->data = data;
243 void *smpi_process_get_user_data()
245 smpi_process_data_t process_data = smpi_process_data();
246 return process_data->data;
249 int smpi_process_count()
251 return process_count;
255 * \brief Returns a structure that stores the location (filename + linenumber)
256 * of the last calls to MPI_* functions.
258 * \see smpi_trace_set_call_location
260 smpi_trace_call_location_t* smpi_process_get_call_location()
262 smpi_process_data_t process_data = smpi_process_data();
263 return &process_data->trace_call_loc;
266 int smpi_process_index()
268 smpi_process_data_t data = smpi_process_data();
269 //return -1 if not initialized
270 return data != nullptr ? data->index : MPI_UNDEFINED;
273 MPI_Comm smpi_process_comm_world()
275 smpi_process_data_t data = smpi_process_data();
276 //return MPI_COMM_NULL if not initialized
277 return data != nullptr ? *data->comm_world : MPI_COMM_NULL;
280 smx_mailbox_t smpi_process_mailbox()
282 smpi_process_data_t data = smpi_process_data();
283 return data->mailbox->getImpl();
286 smx_mailbox_t smpi_process_mailbox_small()
288 smpi_process_data_t data = smpi_process_data();
289 return data->mailbox_small->getImpl();
292 xbt_mutex_t smpi_process_mailboxes_mutex()
294 smpi_process_data_t data = smpi_process_data();
295 return data->mailboxes_mutex;
298 smx_mailbox_t smpi_process_remote_mailbox(int index)
300 smpi_process_data_t data = smpi_process_remote_data(index);
301 return data->mailbox->getImpl();
304 smx_mailbox_t smpi_process_remote_mailbox_small(int index)
306 smpi_process_data_t data = smpi_process_remote_data(index);
307 return data->mailbox_small->getImpl();
310 xbt_mutex_t smpi_process_remote_mailboxes_mutex(int index)
312 smpi_process_data_t data = smpi_process_remote_data(index);
313 return data->mailboxes_mutex;
317 int smpi_process_papi_event_set(void)
319 smpi_process_data_t data = smpi_process_data();
320 return data->papi_event_set;
323 papi_counter_t& smpi_process_papi_counters(void)
325 smpi_process_data_t data = smpi_process_data();
326 return data->papi_counter_data;
330 xbt_os_timer_t smpi_process_timer()
332 smpi_process_data_t data = smpi_process_data();
336 void smpi_process_simulated_start()
338 smpi_process_data_t data = smpi_process_data();
339 data->simulated = SIMIX_get_clock();
342 double smpi_process_simulated_elapsed()
344 smpi_process_data_t data = smpi_process_data();
345 return SIMIX_get_clock() - data->simulated;
348 MPI_Comm smpi_process_comm_self()
350 smpi_process_data_t data = smpi_process_data();
351 if(data->comm_self==MPI_COMM_NULL){
352 MPI_Group group = new Group(1);
353 data->comm_self = new Comm(group, nullptr);
354 group->set_mapping(smpi_process_index(), 0);
357 return data->comm_self;
360 MPI_Comm smpi_process_get_comm_intra()
362 smpi_process_data_t data = smpi_process_data();
363 return data->comm_intra;
366 void smpi_process_set_comm_intra(MPI_Comm comm)
368 smpi_process_data_t data = smpi_process_data();
369 data->comm_intra = comm;
372 void smpi_process_set_sampling(int s)
374 smpi_process_data_t data = smpi_process_data();
378 int smpi_process_get_sampling()
380 smpi_process_data_t data = smpi_process_data();
381 return data->sampling;
384 void smpi_comm_set_copy_data_callback(void (*callback) (smx_activity_t, void*, size_t))
386 smpi_comm_copy_data_callback = callback;
389 void smpi_comm_copy_buffer_callback(smx_activity_t synchro, void *buff, size_t buff_size)
391 XBT_DEBUG("Copy the data over");
393 simgrid::kernel::activity::Comm *comm = dynamic_cast<simgrid::kernel::activity::Comm*>(synchro);
395 if((smpi_privatize_global_variables) && (static_cast<char*>(buff) >= smpi_start_data_exe)
396 && (static_cast<char*>(buff) < smpi_start_data_exe + smpi_size_data_exe )
398 XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !");
400 smpi_switch_data_segment(
401 (static_cast<smpi_process_data_t>((static_cast<simgrid::MsgActorExt*>(comm->src_proc->data)->data))->index));
402 tmpbuff = static_cast<void*>(xbt_malloc(buff_size));
403 memcpy(tmpbuff, buff, buff_size);
406 if((smpi_privatize_global_variables) && ((char*)comm->dst_buff >= smpi_start_data_exe)
407 && ((char*)comm->dst_buff < smpi_start_data_exe + smpi_size_data_exe )){
408 XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment");
409 smpi_switch_data_segment(
410 (static_cast<smpi_process_data_t>((static_cast<simgrid::MsgActorExt*>(comm->dst_proc->data)->data))->index));
413 memcpy(comm->dst_buff, tmpbuff, buff_size);
414 if (comm->detached) {
415 // if this is a detached send, the source buffer was duplicated by SMPI
416 // sender to make the original buffer available to the application ASAP
418 //It seems that the request is used after the call there this should be free somewhere else but where???
419 //xbt_free(comm->comm.src_data);// inside SMPI the request is kept inside the user data and should be free
420 comm->src_buff = nullptr;
423 if(tmpbuff!=buff)xbt_free(tmpbuff);
426 void smpi_comm_null_copy_buffer_callback(smx_activity_t comm, void *buff, size_t buff_size)
428 /* nothing done in this version */
431 static void smpi_check_options(){
432 //check correctness of MPI parameters
434 xbt_assert(xbt_cfg_get_int("smpi/async-small-thresh") <= xbt_cfg_get_int("smpi/send-is-detached-thresh"));
436 if (xbt_cfg_is_default_value("smpi/host-speed")) {
437 XBT_INFO("You did not set the power of the host running the simulation. "
438 "The timings will certainly not be accurate. "
439 "Use the option \"--cfg=smpi/host-speed:<flops>\" to set its value."
440 "Check http://simgrid.org/simgrid/latest/doc/options.html#options_smpi_bench for more information.");
443 xbt_assert(xbt_cfg_get_double("smpi/cpu-threshold") >=0,
444 "The 'smpi/cpu-threshold' option cannot have negative values [anymore]. If you want to discard "
445 "the simulation of any computation, please use 'smpi/simulate-computation:no' instead.");
449 return process_data != nullptr;
452 void smpi_global_init()
456 char name[MAILBOX_NAME_MAXLEN];
459 if (!MC_is_active()) {
460 global_timer = xbt_os_timer_new();
461 xbt_os_walltimer_start(global_timer);
464 if (xbt_cfg_get_string("smpi/comp-adjustment-file")[0] != '\0') {
465 std::string filename {xbt_cfg_get_string("smpi/comp-adjustment-file")};
466 std::ifstream fstream(filename);
467 if (!fstream.is_open()) {
468 xbt_die("Could not open file %s. Does it exist?", filename.c_str());
472 typedef boost::tokenizer< boost::escaped_list_separator<char>> Tokenizer;
473 std::getline(fstream, line); // Skip the header line
474 while (std::getline(fstream, line)) {
476 Tokenizer::iterator it = tok.begin();
477 Tokenizer::iterator end = std::next(tok.begin());
479 std::string location = *it;
480 boost::trim(location);
481 location2speedup.insert(std::pair<std::string, double>(location, std::stod(*end)));
486 // This map holds for each computation unit (such as "default" or "process1" etc.)
487 // the configuration as given by the user (counter data as a pair of (counter_name, counter_counter))
488 // and the (computed) event_set.
489 std::map</* computation unit name */ std::string, papi_process_data> units2papi_setup;
491 if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
492 if (PAPI_library_init(PAPI_VER_CURRENT) != PAPI_VER_CURRENT)
493 XBT_ERROR("Could not initialize PAPI library; is it correctly installed and linked?"
494 " Expected version is %i",
497 typedef boost::tokenizer<boost::char_separator<char>> Tokenizer;
498 boost::char_separator<char> separator_units(";");
499 std::string str = std::string(xbt_cfg_get_string("smpi/papi-events"));
500 Tokenizer tokens(str, separator_units);
502 // Iterate over all the computational units. This could be
503 // processes, hosts, threads, ranks... You name it. I'm not exactly
504 // sure what we will support eventually, so I'll leave it at the
505 // general term "units".
506 for (auto& unit_it : tokens) {
507 boost::char_separator<char> separator_events(":");
508 Tokenizer event_tokens(unit_it, separator_events);
510 int event_set = PAPI_NULL;
511 if (PAPI_create_eventset(&event_set) != PAPI_OK) {
512 // TODO: Should this let the whole simulation die?
513 XBT_CRITICAL("Could not create PAPI event set during init.");
516 // NOTE: We cannot use a map here, as we must obey the order of the counters
517 // This is important for PAPI: We need to map the values of counters back
518 // to the event_names (so, when PAPI_read() has finished)!
519 papi_counter_t counters2values;
521 // Iterate over all counters that were specified for this specific
523 // Note that we need to remove the name of the unit
524 // (that could also be the "default" value), which always comes first.
525 // Hence, we start at ++(events.begin())!
526 for (Tokenizer::iterator events_it = ++(event_tokens.begin()); events_it != event_tokens.end(); events_it++) {
528 int event_code = PAPI_NULL;
529 char* event_name = const_cast<char*>((*events_it).c_str());
530 if (PAPI_event_name_to_code(event_name, &event_code) == PAPI_OK) {
531 if (PAPI_add_event(event_set, event_code) != PAPI_OK) {
532 XBT_ERROR("Could not add PAPI event '%s'. Skipping.", event_name);
535 XBT_DEBUG("Successfully added PAPI event '%s' to the event set.", event_name);
538 XBT_CRITICAL("Could not find PAPI event '%s'. Skipping.", event_name);
542 counters2values.push_back(
543 // We cannot just pass *events_it, as this is of type const basic_string
544 std::make_pair<std::string, long long>(std::string(*events_it), 0));
547 std::string unit_name = *(event_tokens.begin());
548 papi_process_data config = {.counter_data = std::move(counters2values), .event_set = event_set};
550 units2papi_setup.insert(std::make_pair(unit_name, std::move(config)));
554 if (process_count == 0){
555 process_count = SIMIX_process_count();
558 smpi_universe_size = process_count;
559 process_data = new smpi_process_data_t[process_count];
560 for (i = 0; i < process_count; i++) {
561 process_data[i] = new s_smpi_process_data_t;
562 process_data[i]->argc = nullptr;
563 process_data[i]->argv = nullptr;
564 process_data[i]->mailbox = simgrid::s4u::Mailbox::byName(get_mailbox_name(name, i));
565 process_data[i]->mailbox_small = simgrid::s4u::Mailbox::byName(get_mailbox_name_small(name, i));
566 process_data[i]->mailboxes_mutex = xbt_mutex_init();
567 process_data[i]->timer = xbt_os_timer_new();
569 MC_ignore_heap(process_data[i]->timer, xbt_os_timer_size());
570 process_data[i]->comm_self = MPI_COMM_NULL;
571 process_data[i]->comm_intra = MPI_COMM_NULL;
572 process_data[i]->comm_world = nullptr;
573 process_data[i]->state = SMPI_UNINITIALIZED;
574 process_data[i]->sampling = 0;
575 process_data[i]->finalization_barrier = nullptr;
576 process_data[i]->return_value = 0;
579 if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
580 // TODO: Implement host/process/thread based counters. This implementation
581 // just always takes the values passed via "default", like this:
582 // "default:COUNTER1:COUNTER2:COUNTER3;".
583 auto it = units2papi_setup.find(papi_default_config_name);
584 if (it != units2papi_setup.end()) {
585 process_data[i]->papi_event_set = it->second.event_set;
586 process_data[i]->papi_counter_data = it->second.counter_data;
587 XBT_DEBUG("Setting PAPI set for process %i", i);
589 process_data[i]->papi_event_set = PAPI_NULL;
590 XBT_DEBUG("No PAPI set for process %i", i);
595 //if the process was launched through smpirun script we generate a global mpi_comm_world
596 //if not, we let MPI_COMM_NULL, and the comm world will be private to each mpi instance
598 group = new Group(process_count);
599 MPI_COMM_WORLD = new Comm(group, nullptr);
600 MPI_Attr_put(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, reinterpret_cast<void *>(process_count));
601 msg_bar_t bar = MSG_barrier_init(process_count);
603 for (i = 0; i < process_count; i++) {
604 group->set_mapping(i, i);
605 process_data[i]->finalization_barrier = bar;
610 void smpi_global_destroy()
612 int count = smpi_process_count();
614 smpi_bench_destroy();
615 if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
616 delete MPI_COMM_WORLD->group();
617 MSG_barrier_destroy(process_data[0]->finalization_barrier);
619 smpi_deployment_cleanup_instances();
621 for (int i = 0; i < count; i++) {
622 if(process_data[i]->comm_self!=MPI_COMM_NULL){
623 Comm::destroy(process_data[i]->comm_self);
625 if(process_data[i]->comm_intra!=MPI_COMM_NULL){
626 Comm::destroy(process_data[i]->comm_intra);
628 xbt_os_timer_free(process_data[i]->timer);
629 xbt_mutex_destroy(process_data[i]->mailboxes_mutex);
630 delete process_data[i];
632 delete[] process_data;
633 process_data = nullptr;
635 if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
636 MPI_COMM_WORLD->cleanup_smp();
637 MPI_COMM_WORLD->cleanup_attributes();
638 if(smpi_coll_cleanup_callback!=nullptr)
639 smpi_coll_cleanup_callback();
640 delete MPI_COMM_WORLD;
643 MPI_COMM_WORLD = MPI_COMM_NULL;
645 if (!MC_is_active()) {
646 xbt_os_timer_free(global_timer);
649 xbt_free(index_to_process_data);
650 if(smpi_comm_keyvals!=nullptr)
651 xbt_dict_free(&smpi_comm_keyvals);
652 if(smpi_privatize_global_variables)
653 smpi_destroy_global_memory_segments();
659 void __attribute__ ((weak)) user_main_()
661 xbt_die("Should not be in this smpi_simulated_main");
664 int __attribute__ ((weak)) smpi_simulated_main_(int argc, char **argv)
666 smpi_process_init(&argc, &argv);
671 inline static int smpi_main_wrapper(int argc, char **argv){
672 int ret = smpi_simulated_main_(argc,argv);
674 XBT_WARN("SMPI process did not return 0. Return value : %d", ret);
675 smpi_process_data()->return_value=ret;
680 int __attribute__ ((weak)) main(int argc, char **argv)
682 return smpi_main(smpi_main_wrapper, argc, argv);
688 static void smpi_init_logs(){
690 /* Connect log categories. See xbt/log.c */
692 XBT_LOG_CONNECT(smpi); /* Keep this line as soon as possible in this function: xbt_log_appender_file.c depends on it
693 DO NOT connect this in XBT or so, or it will be useless to xbt_log_appender_file.c */
694 XBT_LOG_CONNECT(instr_smpi);
695 XBT_LOG_CONNECT(smpi_base);
696 XBT_LOG_CONNECT(smpi_bench);
697 XBT_LOG_CONNECT(smpi_coll);
698 XBT_LOG_CONNECT(smpi_colls);
699 XBT_LOG_CONNECT(smpi_comm);
700 XBT_LOG_CONNECT(smpi_datatype);
701 XBT_LOG_CONNECT(smpi_dvfs);
702 XBT_LOG_CONNECT(smpi_group);
703 XBT_LOG_CONNECT(smpi_kernel);
704 XBT_LOG_CONNECT(smpi_mpi);
705 XBT_LOG_CONNECT(smpi_memory);
706 XBT_LOG_CONNECT(smpi_op);
707 XBT_LOG_CONNECT(smpi_pmpi);
708 XBT_LOG_CONNECT(smpi_request);
709 XBT_LOG_CONNECT(smpi_replay);
710 XBT_LOG_CONNECT(smpi_rma);
711 XBT_LOG_CONNECT(smpi_utils);
715 static void smpi_init_options(){
716 int gather_id = find_coll_description(mpi_coll_gather_description, xbt_cfg_get_string("smpi/gather"),"gather");
717 mpi_coll_gather_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int, MPI_Datatype, int, MPI_Comm)>
718 (mpi_coll_gather_description[gather_id].coll);
720 int allgather_id = find_coll_description(mpi_coll_allgather_description,
721 xbt_cfg_get_string("smpi/allgather"),"allgather");
722 mpi_coll_allgather_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int, MPI_Datatype, MPI_Comm)>
723 (mpi_coll_allgather_description[allgather_id].coll);
725 int allgatherv_id = find_coll_description(mpi_coll_allgatherv_description,
726 xbt_cfg_get_string("smpi/allgatherv"),"allgatherv");
727 mpi_coll_allgatherv_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int *, int *, MPI_Datatype, MPI_Comm)>
728 (mpi_coll_allgatherv_description[allgatherv_id].coll);
730 int allreduce_id = find_coll_description(mpi_coll_allreduce_description,
731 xbt_cfg_get_string("smpi/allreduce"),"allreduce");
732 mpi_coll_allreduce_fun = reinterpret_cast<int (*)(void *sbuf, void *rbuf, int rcount, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)>
733 (mpi_coll_allreduce_description[allreduce_id].coll);
735 int alltoall_id = find_coll_description(mpi_coll_alltoall_description,
736 xbt_cfg_get_string("smpi/alltoall"),"alltoall");
737 mpi_coll_alltoall_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int, MPI_Datatype, MPI_Comm)>
738 (mpi_coll_alltoall_description[alltoall_id].coll);
740 int alltoallv_id = find_coll_description(mpi_coll_alltoallv_description,
741 xbt_cfg_get_string("smpi/alltoallv"),"alltoallv");
742 mpi_coll_alltoallv_fun = reinterpret_cast<int (*)(void *, int *, int *, MPI_Datatype, void *, int *, int *, MPI_Datatype, MPI_Comm)>
743 (mpi_coll_alltoallv_description[alltoallv_id].coll);
745 int bcast_id = find_coll_description(mpi_coll_bcast_description, xbt_cfg_get_string("smpi/bcast"),"bcast");
746 mpi_coll_bcast_fun = reinterpret_cast<int (*)(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm com)>
747 (mpi_coll_bcast_description[bcast_id].coll);
749 int reduce_id = find_coll_description(mpi_coll_reduce_description, xbt_cfg_get_string("smpi/reduce"),"reduce");
750 mpi_coll_reduce_fun = reinterpret_cast<int (*)(void *buf, void *rbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)>
751 (mpi_coll_reduce_description[reduce_id].coll);
753 int reduce_scatter_id =
754 find_coll_description(mpi_coll_reduce_scatter_description,
755 xbt_cfg_get_string("smpi/reduce-scatter"),"reduce_scatter");
756 mpi_coll_reduce_scatter_fun = reinterpret_cast<int (*)(void *sbuf, void *rbuf, int *rcounts,MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)>
757 (mpi_coll_reduce_scatter_description[reduce_scatter_id].coll);
759 int scatter_id = find_coll_description(mpi_coll_scatter_description, xbt_cfg_get_string("smpi/scatter"),"scatter");
760 mpi_coll_scatter_fun = reinterpret_cast<int (*)(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf,int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)>
761 (mpi_coll_scatter_description[scatter_id].coll);
763 int barrier_id = find_coll_description(mpi_coll_barrier_description, xbt_cfg_get_string("smpi/barrier"),"barrier");
764 mpi_coll_barrier_fun = reinterpret_cast<int (*)(MPI_Comm comm)>
765 (mpi_coll_barrier_description[barrier_id].coll);
767 smpi_coll_cleanup_callback=nullptr;
768 smpi_cpu_threshold = xbt_cfg_get_double("smpi/cpu-threshold");
769 smpi_host_speed = xbt_cfg_get_double("smpi/host-speed");
770 smpi_privatize_global_variables = xbt_cfg_get_boolean("smpi/privatize-global-variables");
771 if (smpi_cpu_threshold < 0)
772 smpi_cpu_threshold = DBL_MAX;
774 char* val = xbt_cfg_get_string("smpi/shared-malloc");
775 if (!strcasecmp(val, "yes") || !strcmp(val, "1") || !strcasecmp(val, "on") || !strcasecmp(val, "global")) {
776 smpi_cfg_shared_malloc = shmalloc_global;
777 } else if (!strcasecmp(val, "local")) {
778 smpi_cfg_shared_malloc = shmalloc_local;
779 } else if (!strcasecmp(val, "no") || !strcmp(val, "0") || !strcasecmp(val, "off")) {
780 smpi_cfg_shared_malloc = shmalloc_none;
782 xbt_die("Invalid value '%s' for option smpi/shared-malloc. Possible values: 'on' or 'global', 'local', 'off'",
787 int smpi_main(int (*realmain) (int argc, char *argv[]), int argc, char *argv[])
789 srand(SMPI_RAND_SEED);
791 if (getenv("SMPI_PRETEND_CC") != nullptr) {
792 /* Hack to ensure that smpicc can pretend to be a simple compiler. Particularly handy to pass it to the
793 * configuration tools */
798 TRACE_global_init(&argc, argv);
799 TRACE_add_start_function(TRACE_smpi_alloc);
800 TRACE_add_end_function(TRACE_smpi_release);
802 SIMIX_global_init(&argc, argv);
803 MSG_init(&argc,argv);
805 SMPI_switch_data_segment = &smpi_switch_data_segment;
809 // parse the platform file: get the host list
810 SIMIX_create_environment(argv[1]);
811 SIMIX_comm_set_copy_data_callback(smpi_comm_copy_data_callback);
812 SIMIX_function_register_default(realmain);
813 SIMIX_launch_application(argv[2]);
817 smpi_check_options();
819 if(smpi_privatize_global_variables)
820 smpi_initialize_global_memory_segments();
822 /* Clean IO before the run */
826 if (MC_is_active()) {
832 xbt_os_walltimer_stop(global_timer);
833 if (xbt_cfg_get_boolean("smpi/display-timing")){
834 double global_time = xbt_os_timer_elapsed(global_timer);
835 XBT_INFO("Simulated time: %g seconds. \n\n"
836 "The simulation took %g seconds (after parsing and platform setup)\n"
837 "%g seconds were actual computation of the application",
838 SIMIX_get_clock(), global_time , smpi_total_benched_time);
840 if (smpi_total_benched_time/global_time>=0.75)
841 XBT_INFO("More than 75%% of the time was spent inside the application code.\n"
842 "You may want to use sampling functions or trace replay to reduce this.");
845 int count = smpi_process_count();
847 for (i = 0; i < count; i++) {
848 if(process_data[i]->return_value!=0){
849 ret=process_data[i]->return_value;//return first non 0 value
853 smpi_global_destroy();
860 // This function can be called from extern file, to initialize logs, options, and processes of smpi
861 // without the need of smpirun
866 smpi_check_options();
867 if (TRACE_is_enabled() && TRACE_is_configured())
869 if(smpi_privatize_global_variables)
870 smpi_initialize_global_memory_segments();
873 void SMPI_finalize(){
874 smpi_global_destroy();