1 /* Copyright (c) 2007-2015. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
10 #include "simgrid/s4u/Mailbox.hpp"
11 #include "simgrid/sg_config.h"
12 #include "smpi_mpi_dt_private.h"
13 #include "src/kernel/activity/SynchroComm.hpp"
14 #include "src/mc/mc_record.h"
15 #include "src/mc/mc_replay.h"
16 #include "src/msg/msg_private.h"
17 #include "src/simix/smx_private.h"
18 #include "surf/surf.h"
19 #include "xbt/replay.h"
21 #include <float.h> /* DBL_MAX */
30 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_kernel, smpi, "Logging specific to SMPI (kernel)");
31 #include <boost/tokenizer.hpp>
32 #include <boost/algorithm/string.hpp> /* trim_right / trim_left */
36 const char* papi_default_config_name = "default";
38 struct papi_process_data {
39 papi_counter_t counter_data;
44 std::unordered_map<std::string, double> location2speedup;
46 typedef struct s_smpi_process_data {
50 simgrid::s4u::MailboxPtr mailbox;
51 simgrid::s4u::MailboxPtr mailbox_small;
52 xbt_mutex_t mailboxes_mutex;
57 void *data; /* user data */
60 int sampling; /* inside an SMPI_SAMPLE_ block? */
62 bool replaying; /* is the process replaying a trace */
63 msg_bar_t finalization_barrier;
65 smpi_trace_call_location_t trace_call_loc;
67 /** Contains hardware data as read by PAPI **/
69 papi_counter_t papi_counter_data;
71 } s_smpi_process_data_t;
73 static smpi_process_data_t *process_data = nullptr;
74 int process_count = 0;
75 int smpi_universe_size = 0;
76 int* index_to_process_data = nullptr;
77 extern double smpi_total_benched_time;
78 extern xbt_dict_t smpi_type_keyvals;
79 extern xbt_dict_t smpi_comm_keyvals;
80 xbt_os_timer_t global_timer;
81 MPI_Comm MPI_COMM_WORLD = MPI_COMM_UNINITIALIZED;
82 MPI_Errhandler *MPI_ERRORS_RETURN = nullptr;
83 MPI_Errhandler *MPI_ERRORS_ARE_FATAL = nullptr;
84 MPI_Errhandler *MPI_ERRHANDLER_NULL = nullptr;
86 #define MAILBOX_NAME_MAXLEN (5 + sizeof(int) * 2 + 1)
88 static char *get_mailbox_name(char *str, int index)
90 snprintf(str, MAILBOX_NAME_MAXLEN, "SMPI-%0*x", static_cast<int> (sizeof(int) * 2), index);
94 static char *get_mailbox_name_small(char *str, int index)
96 snprintf(str, MAILBOX_NAME_MAXLEN, "small%0*x", static_cast<int> (sizeof(int) * 2), index);
100 void smpi_process_init(int *argc, char ***argv)
103 if (process_data == nullptr){
104 printf("SimGrid was not initialized properly before entering MPI_Init. Aborting, please check compilation process and use smpirun\n");
107 if (argc != nullptr && argv != nullptr) {
108 smx_actor_t proc = SIMIX_process_self();
109 proc->context->set_cleanup(&MSG_process_cleanup_from_SIMIX);
110 char* instance_id = (*argv)[1];
111 int rank = xbt_str_parse_int((*argv)[2], "Invalid rank: %s");
112 int index = smpi_process_index_of_smx_process(proc);
114 if(index_to_process_data == nullptr){
115 index_to_process_data=static_cast<int*>(xbt_malloc(SIMIX_process_count()*sizeof(int)));
118 if(smpi_privatize_global_variables){
119 /* Now using segment index of the process */
120 index = proc->segment_index;
121 /* Done at the process's creation */
122 SMPI_switch_data_segment(index);
125 MPI_Comm* temp_comm_world;
127 smpi_deployment_register_process(instance_id, rank, index, &temp_comm_world, &temp_bar);
128 smpi_process_data_t data = smpi_process_remote_data(index);
129 data->comm_world = temp_comm_world;
130 if(temp_bar != nullptr)
131 data->finalization_barrier = temp_bar;
133 data->instance_id = instance_id;
134 data->replaying = false;
136 simdata_process_t simdata = static_cast<simdata_process_t>(simcall_process_get_data(proc));
137 simdata->data = data;
140 memmove(&(*argv)[0], &(*argv)[2], sizeof(char *) * (*argc - 2));
141 (*argv)[(*argc) - 1] = nullptr;
142 (*argv)[(*argc) - 2] = nullptr;
147 // set the process attached to the mailbox
148 data->mailbox_small->setReceiver(simgrid::s4u::Actor::self());
149 XBT_DEBUG("<%d> New process in the game: %p", index, proc);
151 xbt_assert(smpi_process_data(),
152 "smpi_process_data() returned nullptr. You probably gave a nullptr parameter to MPI_Init. "
153 "Although it's required by MPI-2, this is currently not supported by SMPI.");
156 void smpi_process_destroy()
158 int index = smpi_process_index();
159 if(smpi_privatize_global_variables){
160 smpi_switch_data_segment(index);
162 process_data[index_to_process_data[index]]->state = SMPI_FINALIZED;
163 XBT_DEBUG("<%d> Process left the game", index);
166 /** @brief Prepares the current process for termination. */
167 void smpi_process_finalize()
169 // This leads to an explosion of the search graph which cannot be reduced:
170 if(MC_is_active() || MC_record_replay_is_active())
173 int index = smpi_process_index();
174 // wait for all pending asynchronous comms to finish
175 MSG_barrier_wait(process_data[index_to_process_data[index]]->finalization_barrier);
178 /** @brief Check if a process is finalized */
179 int smpi_process_finalized()
181 int index = smpi_process_index();
182 if (index != MPI_UNDEFINED)
183 return (process_data[index_to_process_data[index]]->state == SMPI_FINALIZED);
188 /** @brief Check if a process is initialized */
189 int smpi_process_initialized()
191 if (index_to_process_data == nullptr){
194 int index = smpi_process_index();
195 return ((index != MPI_UNDEFINED) && (process_data[index_to_process_data[index]]->state == SMPI_INITIALIZED));
199 /** @brief Mark a process as initialized (=MPI_Init called) */
200 void smpi_process_mark_as_initialized()
202 int index = smpi_process_index();
203 if ((index != MPI_UNDEFINED) && (process_data[index_to_process_data[index]]->state != SMPI_FINALIZED))
204 process_data[index_to_process_data[index]]->state = SMPI_INITIALIZED;
207 void smpi_process_set_replaying(bool value){
208 int index = smpi_process_index();
209 if ((index != MPI_UNDEFINED) && (process_data[index_to_process_data[index]]->state != SMPI_FINALIZED))
210 process_data[index_to_process_data[index]]->replaying = value;
213 bool smpi_process_get_replaying(){
214 int index = smpi_process_index();
215 if (index != MPI_UNDEFINED)
216 return process_data[index_to_process_data[index]]->replaying;
217 else return (_xbt_replay_is_active() != 0);
220 int smpi_global_size()
222 char *value = getenv("SMPI_GLOBAL_SIZE");
223 xbt_assert(value,"Please set env var SMPI_GLOBAL_SIZE to the expected number of processes.");
225 return xbt_str_parse_int(value, "SMPI_GLOBAL_SIZE contains a non-numerical value: %s");
228 smpi_process_data_t smpi_process_data()
230 simdata_process_t simdata = static_cast<simdata_process_t>(SIMIX_process_self_get_data());
231 return static_cast<smpi_process_data_t>(simdata->data);
234 smpi_process_data_t smpi_process_remote_data(int index)
236 return process_data[index_to_process_data[index]];
239 void smpi_process_set_user_data(void *data)
241 smpi_process_data_t process_data = smpi_process_data();
242 process_data->data = data;
245 void *smpi_process_get_user_data()
247 smpi_process_data_t process_data = smpi_process_data();
248 return process_data->data;
251 int smpi_process_count()
253 return process_count;
257 * \brief Returns a structure that stores the location (filename + linenumber)
258 * of the last calls to MPI_* functions.
260 * \see smpi_trace_set_call_location
262 smpi_trace_call_location_t* smpi_process_get_call_location()
264 smpi_process_data_t process_data = smpi_process_data();
265 return &process_data->trace_call_loc;
268 int smpi_process_index()
270 smpi_process_data_t data = smpi_process_data();
271 //return -1 if not initialized
272 return data != nullptr ? data->index : MPI_UNDEFINED;
275 MPI_Comm smpi_process_comm_world()
277 smpi_process_data_t data = smpi_process_data();
278 //return MPI_COMM_NULL if not initialized
279 return data != nullptr ? *data->comm_world : MPI_COMM_NULL;
282 smx_mailbox_t smpi_process_mailbox()
284 smpi_process_data_t data = smpi_process_data();
285 return data->mailbox->getImpl();
288 smx_mailbox_t smpi_process_mailbox_small()
290 smpi_process_data_t data = smpi_process_data();
291 return data->mailbox_small->getImpl();
294 xbt_mutex_t smpi_process_mailboxes_mutex()
296 smpi_process_data_t data = smpi_process_data();
297 return data->mailboxes_mutex;
300 smx_mailbox_t smpi_process_remote_mailbox(int index)
302 smpi_process_data_t data = smpi_process_remote_data(index);
303 return data->mailbox->getImpl();
306 smx_mailbox_t smpi_process_remote_mailbox_small(int index)
308 smpi_process_data_t data = smpi_process_remote_data(index);
309 return data->mailbox_small->getImpl();
312 xbt_mutex_t smpi_process_remote_mailboxes_mutex(int index)
314 smpi_process_data_t data = smpi_process_remote_data(index);
315 return data->mailboxes_mutex;
319 int smpi_process_papi_event_set(void)
321 smpi_process_data_t data = smpi_process_data();
322 return data->papi_event_set;
325 papi_counter_t& smpi_process_papi_counters(void)
327 smpi_process_data_t data = smpi_process_data();
328 return data->papi_counter_data;
332 xbt_os_timer_t smpi_process_timer()
334 smpi_process_data_t data = smpi_process_data();
338 void smpi_process_simulated_start()
340 smpi_process_data_t data = smpi_process_data();
341 data->simulated = SIMIX_get_clock();
344 double smpi_process_simulated_elapsed()
346 smpi_process_data_t data = smpi_process_data();
347 return SIMIX_get_clock() - data->simulated;
350 MPI_Comm smpi_process_comm_self()
352 smpi_process_data_t data = smpi_process_data();
353 if(data->comm_self==MPI_COMM_NULL){
354 MPI_Group group = smpi_group_new(1);
355 data->comm_self = smpi_comm_new(group, nullptr);
356 smpi_group_set_mapping(group, smpi_process_index(), 0);
359 return data->comm_self;
362 MPI_Comm smpi_process_get_comm_intra()
364 smpi_process_data_t data = smpi_process_data();
365 return data->comm_intra;
368 void smpi_process_set_comm_intra(MPI_Comm comm)
370 smpi_process_data_t data = smpi_process_data();
371 data->comm_intra = comm;
374 void smpi_process_set_sampling(int s)
376 smpi_process_data_t data = smpi_process_data();
380 int smpi_process_get_sampling()
382 smpi_process_data_t data = smpi_process_data();
383 return data->sampling;
386 void print_request(const char *message, MPI_Request request)
388 XBT_VERB("%s request %p [buf = %p, size = %zu, src = %d, dst = %d, tag = %d, flags = %x]",
389 message, request, request->buf, request->size, request->src, request->dst, request->tag, request->flags);
392 void smpi_comm_copy_buffer_callback(smx_activity_t synchro, void *buff, size_t buff_size)
394 XBT_DEBUG("Copy the data over");
396 simgrid::kernel::activity::Comm *comm = dynamic_cast<simgrid::kernel::activity::Comm*>(synchro);
398 if((smpi_privatize_global_variables) && (static_cast<char*>(buff) >= smpi_start_data_exe)
399 && (static_cast<char*>(buff) < smpi_start_data_exe + smpi_size_data_exe )
401 XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !");
404 smpi_switch_data_segment((static_cast<smpi_process_data_t>((static_cast<simdata_process_t>(SIMIX_process_get_data(comm->src_proc))->data))->index));
405 tmpbuff = static_cast<void*>(xbt_malloc(buff_size));
406 memcpy(tmpbuff, buff, buff_size);
409 if((smpi_privatize_global_variables) && ((char*)comm->dst_buff >= smpi_start_data_exe)
410 && ((char*)comm->dst_buff < smpi_start_data_exe + smpi_size_data_exe )){
411 XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment");
412 smpi_switch_data_segment((static_cast<smpi_process_data_t>((static_cast<simdata_process_t>(SIMIX_process_get_data(comm->dst_proc))->data))->index));
415 memcpy(comm->dst_buff, tmpbuff, buff_size);
416 if (comm->detached) {
417 // if this is a detached send, the source buffer was duplicated by SMPI
418 // sender to make the original buffer available to the application ASAP
420 //It seems that the request is used after the call there this should be free somewhere else but where???
421 //xbt_free(comm->comm.src_data);// inside SMPI the request is kept inside the user data and should be free
422 comm->src_buff = nullptr;
425 if(tmpbuff!=buff)xbt_free(tmpbuff);
428 void smpi_comm_null_copy_buffer_callback(smx_activity_t comm, void *buff, size_t buff_size)
430 /* nothing done in this version */
433 static void smpi_check_options(){
434 //check correctness of MPI parameters
436 xbt_assert(xbt_cfg_get_int("smpi/async-small-thresh") <= xbt_cfg_get_int("smpi/send-is-detached-thresh"));
438 if (xbt_cfg_is_default_value("smpi/host-speed")) {
439 XBT_INFO("You did not set the power of the host running the simulation. "
440 "The timings will certainly not be accurate. "
441 "Use the option \"--cfg=smpi/host-speed:<flops>\" to set its value."
442 "Check http://simgrid.org/simgrid/latest/doc/options.html#options_smpi_bench for more information.");
445 xbt_assert(xbt_cfg_get_double("smpi/cpu-threshold") >=0,
446 "The 'smpi/cpu-threshold' option cannot have negative values [anymore]. If you want to discard "
447 "the simulation of any computation, please use 'smpi/simulate-computation:no' instead.");
451 return process_data != nullptr;
454 void smpi_global_init()
458 char name[MAILBOX_NAME_MAXLEN];
461 if (!MC_is_active()) {
462 global_timer = xbt_os_timer_new();
463 xbt_os_walltimer_start(global_timer);
466 if (xbt_cfg_get_string("smpi/comp-adjustment-file")[0] != '\0') {
467 std::string filename {xbt_cfg_get_string("smpi/comp-adjustment-file")};
468 std::ifstream fstream(filename);
469 if (!fstream.is_open()) {
470 xbt_die("Could not open file %s. Does it exist?", filename.c_str());
474 typedef boost::tokenizer< boost::escaped_list_separator<char>> Tokenizer;
475 std::getline(fstream, line); // Skip the header line
476 while (std::getline(fstream, line)) {
478 Tokenizer::iterator it = tok.begin();
479 Tokenizer::iterator end = std::next(tok.begin());
481 std::string location = *it;
482 boost::trim(location);
483 location2speedup.insert(std::pair<std::string, double>(location, std::stod(*end)));
488 // This map holds for each computation unit (such as "default" or "process1" etc.)
489 // the configuration as given by the user (counter data as a pair of (counter_name, counter_counter))
490 // and the (computed) event_set.
491 std::map</* computation unit name */ std::string, papi_process_data> units2papi_setup;
493 if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
494 if (PAPI_library_init(PAPI_VER_CURRENT) != PAPI_VER_CURRENT)
495 XBT_ERROR("Could not initialize PAPI library; is it correctly installed and linked?"
496 " Expected version is %i",
499 typedef boost::tokenizer<boost::char_separator<char>> Tokenizer;
500 boost::char_separator<char> separator_units(";");
501 std::string str = std::string(xbt_cfg_get_string("smpi/papi-events"));
502 Tokenizer tokens(str, separator_units);
504 // Iterate over all the computational units. This could be
505 // processes, hosts, threads, ranks... You name it. I'm not exactly
506 // sure what we will support eventually, so I'll leave it at the
507 // general term "units".
508 for (auto& unit_it : tokens) {
509 boost::char_separator<char> separator_events(":");
510 Tokenizer event_tokens(unit_it, separator_events);
512 int event_set = PAPI_NULL;
513 if (PAPI_create_eventset(&event_set) != PAPI_OK) {
514 // TODO: Should this let the whole simulation die?
515 XBT_CRITICAL("Could not create PAPI event set during init.");
518 // NOTE: We cannot use a map here, as we must obey the order of the counters
519 // This is important for PAPI: We need to map the values of counters back
520 // to the event_names (so, when PAPI_read() has finished)!
521 papi_counter_t counters2values;
523 // Iterate over all counters that were specified for this specific
525 // Note that we need to remove the name of the unit
526 // (that could also be the "default" value), which always comes first.
527 // Hence, we start at ++(events.begin())!
528 for (Tokenizer::iterator events_it = ++(event_tokens.begin()); events_it != event_tokens.end(); events_it++) {
530 int event_code = PAPI_NULL;
531 char* event_name = const_cast<char*>((*events_it).c_str());
532 if (PAPI_event_name_to_code(event_name, &event_code) == PAPI_OK) {
533 if (PAPI_add_event(event_set, event_code) != PAPI_OK) {
534 XBT_ERROR("Could not add PAPI event '%s'. Skipping.", event_name);
537 XBT_DEBUG("Successfully added PAPI event '%s' to the event set.", event_name);
540 XBT_CRITICAL("Could not find PAPI event '%s'. Skipping.", event_name);
544 counters2values.push_back(
545 // We cannot just pass *events_it, as this is of type const basic_string
546 std::make_pair<std::string, long long>(std::string(*events_it), 0));
549 std::string unit_name = *(event_tokens.begin());
550 papi_process_data config = {.counter_data = std::move(counters2values), .event_set = event_set};
552 units2papi_setup.insert(std::make_pair(unit_name, std::move(config)));
556 if (process_count == 0){
557 process_count = SIMIX_process_count();
560 smpi_universe_size = process_count;
561 process_data = new smpi_process_data_t[process_count];
562 for (i = 0; i < process_count; i++) {
563 process_data[i] = new s_smpi_process_data_t;
564 process_data[i]->argc = nullptr;
565 process_data[i]->argv = nullptr;
566 process_data[i]->mailbox = simgrid::s4u::Mailbox::byName(get_mailbox_name(name, i));
567 process_data[i]->mailbox_small = simgrid::s4u::Mailbox::byName(get_mailbox_name_small(name, i));
568 process_data[i]->mailboxes_mutex = xbt_mutex_init();
569 process_data[i]->timer = xbt_os_timer_new();
571 MC_ignore_heap(process_data[i]->timer, xbt_os_timer_size());
572 process_data[i]->comm_self = MPI_COMM_NULL;
573 process_data[i]->comm_intra = MPI_COMM_NULL;
574 process_data[i]->comm_world = nullptr;
575 process_data[i]->state = SMPI_UNINITIALIZED;
576 process_data[i]->sampling = 0;
577 process_data[i]->finalization_barrier = nullptr;
578 process_data[i]->return_value = 0;
581 if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
582 // TODO: Implement host/process/thread based counters. This implementation
583 // just always takes the values passed via "default", like this:
584 // "default:COUNTER1:COUNTER2:COUNTER3;".
585 auto it = units2papi_setup.find(papi_default_config_name);
586 if (it != units2papi_setup.end()) {
587 process_data[i]->papi_event_set = it->second.event_set;
588 process_data[i]->papi_counter_data = it->second.counter_data;
589 XBT_DEBUG("Setting PAPI set for process %i", i);
591 process_data[i]->papi_event_set = PAPI_NULL;
592 XBT_DEBUG("No PAPI set for process %i", i);
597 //if the process was launched through smpirun script we generate a global mpi_comm_world
598 //if not, we let MPI_COMM_NULL, and the comm world will be private to each mpi instance
600 group = smpi_group_new(process_count);
601 MPI_COMM_WORLD = smpi_comm_new(group, nullptr);
602 MPI_Attr_put(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, reinterpret_cast<void *>(process_count));
603 msg_bar_t bar = MSG_barrier_init(process_count);
605 for (i = 0; i < process_count; i++) {
606 smpi_group_set_mapping(group, i, i);
607 process_data[i]->finalization_barrier = bar;
612 void smpi_global_destroy()
614 int count = smpi_process_count();
616 smpi_bench_destroy();
617 if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
618 while (smpi_group_unuse(smpi_comm_group(MPI_COMM_WORLD)) > 0);
619 MSG_barrier_destroy(process_data[0]->finalization_barrier);
621 smpi_deployment_cleanup_instances();
623 for (int i = 0; i < count; i++) {
624 if(process_data[i]->comm_self!=MPI_COMM_NULL){
625 smpi_comm_destroy(process_data[i]->comm_self);
627 if(process_data[i]->comm_intra!=MPI_COMM_NULL){
628 smpi_comm_destroy(process_data[i]->comm_intra);
630 xbt_os_timer_free(process_data[i]->timer);
631 xbt_mutex_destroy(process_data[i]->mailboxes_mutex);
632 delete process_data[i];
634 delete[] process_data;
635 process_data = nullptr;
637 if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
638 smpi_comm_cleanup_smp(MPI_COMM_WORLD);
639 smpi_comm_cleanup_attributes(MPI_COMM_WORLD);
640 if(smpi_coll_cleanup_callback!=nullptr)
641 smpi_coll_cleanup_callback();
642 xbt_free(MPI_COMM_WORLD);
645 MPI_COMM_WORLD = MPI_COMM_NULL;
647 if (!MC_is_active()) {
648 xbt_os_timer_free(global_timer);
651 xbt_free(index_to_process_data);
652 if(smpi_type_keyvals!=nullptr)
653 xbt_dict_free(&smpi_type_keyvals);
654 if(smpi_comm_keyvals!=nullptr)
655 xbt_dict_free(&smpi_comm_keyvals);
656 if(smpi_privatize_global_variables)
657 smpi_destroy_global_memory_segments();
663 void __attribute__ ((weak)) user_main_()
665 xbt_die("Should not be in this smpi_simulated_main");
668 int __attribute__ ((weak)) smpi_simulated_main_(int argc, char **argv)
670 smpi_process_init(&argc, &argv);
675 inline static int smpi_main_wrapper(int argc, char **argv){
676 int ret = smpi_simulated_main_(argc,argv);
678 XBT_WARN("SMPI process did not return 0. Return value : %d", ret);
679 smpi_process_data()->return_value=ret;
684 int __attribute__ ((weak)) main(int argc, char **argv)
686 return smpi_main(smpi_main_wrapper, argc, argv);
692 static void smpi_init_logs(){
694 /* Connect log categories. See xbt/log.c */
696 XBT_LOG_CONNECT(smpi); /* Keep this line as soon as possible in this function: xbt_log_appender_file.c depends on it
697 DO NOT connect this in XBT or so, or it will be useless to xbt_log_appender_file.c */
698 XBT_LOG_CONNECT(instr_smpi);
699 XBT_LOG_CONNECT(smpi_base);
700 XBT_LOG_CONNECT(smpi_bench);
701 XBT_LOG_CONNECT(smpi_coll);
702 XBT_LOG_CONNECT(smpi_colls);
703 XBT_LOG_CONNECT(smpi_comm);
704 XBT_LOG_CONNECT(smpi_dvfs);
705 XBT_LOG_CONNECT(smpi_group);
706 XBT_LOG_CONNECT(smpi_kernel);
707 XBT_LOG_CONNECT(smpi_mpi);
708 XBT_LOG_CONNECT(smpi_mpi_dt);
709 XBT_LOG_CONNECT(smpi_pmpi);
710 XBT_LOG_CONNECT(smpi_replay);
711 XBT_LOG_CONNECT(smpi_rma);
715 static void smpi_init_options(){
716 int gather_id = find_coll_description(mpi_coll_gather_description, xbt_cfg_get_string("smpi/gather"),"gather");
717 mpi_coll_gather_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int, MPI_Datatype, int, MPI_Comm)>
718 (mpi_coll_gather_description[gather_id].coll);
720 int allgather_id = find_coll_description(mpi_coll_allgather_description,
721 xbt_cfg_get_string("smpi/allgather"),"allgather");
722 mpi_coll_allgather_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int, MPI_Datatype, MPI_Comm)>
723 (mpi_coll_allgather_description[allgather_id].coll);
725 int allgatherv_id = find_coll_description(mpi_coll_allgatherv_description,
726 xbt_cfg_get_string("smpi/allgatherv"),"allgatherv");
727 mpi_coll_allgatherv_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int *, int *, MPI_Datatype, MPI_Comm)>
728 (mpi_coll_allgatherv_description[allgatherv_id].coll);
730 int allreduce_id = find_coll_description(mpi_coll_allreduce_description,
731 xbt_cfg_get_string("smpi/allreduce"),"allreduce");
732 mpi_coll_allreduce_fun = reinterpret_cast<int (*)(void *sbuf, void *rbuf, int rcount, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)>
733 (mpi_coll_allreduce_description[allreduce_id].coll);
735 int alltoall_id = find_coll_description(mpi_coll_alltoall_description,
736 xbt_cfg_get_string("smpi/alltoall"),"alltoall");
737 mpi_coll_alltoall_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int, MPI_Datatype, MPI_Comm)>
738 (mpi_coll_alltoall_description[alltoall_id].coll);
740 int alltoallv_id = find_coll_description(mpi_coll_alltoallv_description,
741 xbt_cfg_get_string("smpi/alltoallv"),"alltoallv");
742 mpi_coll_alltoallv_fun = reinterpret_cast<int (*)(void *, int *, int *, MPI_Datatype, void *, int *, int *, MPI_Datatype, MPI_Comm)>
743 (mpi_coll_alltoallv_description[alltoallv_id].coll);
745 int bcast_id = find_coll_description(mpi_coll_bcast_description, xbt_cfg_get_string("smpi/bcast"),"bcast");
746 mpi_coll_bcast_fun = reinterpret_cast<int (*)(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm com)>
747 (mpi_coll_bcast_description[bcast_id].coll);
749 int reduce_id = find_coll_description(mpi_coll_reduce_description, xbt_cfg_get_string("smpi/reduce"),"reduce");
750 mpi_coll_reduce_fun = reinterpret_cast<int (*)(void *buf, void *rbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)>
751 (mpi_coll_reduce_description[reduce_id].coll);
753 int reduce_scatter_id =
754 find_coll_description(mpi_coll_reduce_scatter_description,
755 xbt_cfg_get_string("smpi/reduce-scatter"),"reduce_scatter");
756 mpi_coll_reduce_scatter_fun = reinterpret_cast<int (*)(void *sbuf, void *rbuf, int *rcounts,MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)>
757 (mpi_coll_reduce_scatter_description[reduce_scatter_id].coll);
759 int scatter_id = find_coll_description(mpi_coll_scatter_description, xbt_cfg_get_string("smpi/scatter"),"scatter");
760 mpi_coll_scatter_fun = reinterpret_cast<int (*)(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf,int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)>
761 (mpi_coll_scatter_description[scatter_id].coll);
763 int barrier_id = find_coll_description(mpi_coll_barrier_description, xbt_cfg_get_string("smpi/barrier"),"barrier");
764 mpi_coll_barrier_fun = reinterpret_cast<int (*)(MPI_Comm comm)>
765 (mpi_coll_barrier_description[barrier_id].coll);
767 smpi_coll_cleanup_callback=nullptr;
768 smpi_cpu_threshold = xbt_cfg_get_double("smpi/cpu-threshold");
769 smpi_host_speed = xbt_cfg_get_double("smpi/host-speed");
770 smpi_privatize_global_variables = xbt_cfg_get_boolean("smpi/privatize-global-variables");
771 if (smpi_cpu_threshold < 0)
772 smpi_cpu_threshold = DBL_MAX;
775 int smpi_main(int (*realmain) (int argc, char *argv[]), int argc, char *argv[])
777 srand(SMPI_RAND_SEED);
779 if (getenv("SMPI_PRETEND_CC") != nullptr) {
780 /* Hack to ensure that smpicc can pretend to be a simple compiler. Particularly handy to pass it to the
781 * configuration tools */
786 TRACE_global_init(&argc, argv);
787 TRACE_add_start_function(TRACE_smpi_alloc);
788 TRACE_add_end_function(TRACE_smpi_release);
790 SIMIX_global_init(&argc, argv);
791 MSG_init(&argc,argv);
793 SMPI_switch_data_segment = &smpi_switch_data_segment;
797 // parse the platform file: get the host list
798 SIMIX_create_environment(argv[1]);
799 SIMIX_comm_set_copy_data_callback(&smpi_comm_copy_buffer_callback);
800 SIMIX_function_register_default(realmain);
801 SIMIX_launch_application(argv[2]);
805 smpi_check_options();
807 if(smpi_privatize_global_variables)
808 smpi_initialize_global_memory_segments();
810 /* Clean IO before the run */
814 if (MC_is_active()) {
820 xbt_os_walltimer_stop(global_timer);
821 if (xbt_cfg_get_boolean("smpi/display-timing")){
822 double global_time = xbt_os_timer_elapsed(global_timer);
823 XBT_INFO("Simulated time: %g seconds. \n\n"
824 "The simulation took %g seconds (after parsing and platform setup)\n"
825 "%g seconds were actual computation of the application",
826 SIMIX_get_clock(), global_time , smpi_total_benched_time);
828 if (smpi_total_benched_time/global_time>=0.75)
829 XBT_INFO("More than 75%% of the time was spent inside the application code.\n"
830 "You may want to use sampling functions or trace replay to reduce this.");
833 int count = smpi_process_count();
835 for (i = 0; i < count; i++) {
836 if(process_data[i]->return_value!=0){
837 ret=process_data[i]->return_value;//return first non 0 value
841 smpi_global_destroy();
848 // This function can be called from extern file, to initialize logs, options, and processes of smpi
849 // without the need of smpirun
854 smpi_check_options();
855 if (TRACE_is_enabled() && TRACE_is_configured())
857 if(smpi_privatize_global_variables)
858 smpi_initialize_global_memory_segments();
861 void SMPI_finalize(){
862 smpi_global_destroy();