1 /* Copyright (c) 2007-2015. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
10 #include "simgrid/s4u/Mailbox.hpp"
11 #include "simgrid/sg_config.h"
12 #include "smpi_mpi_dt_private.h"
13 #include "src/kernel/activity/SynchroComm.hpp"
14 #include "src/mc/mc_record.h"
15 #include "src/mc/mc_replay.h"
16 #include "src/msg/msg_private.h"
17 #include "src/simix/smx_private.h"
18 #include "surf/surf.h"
19 #include "xbt/replay.h"
21 #include <float.h> /* DBL_MAX */
30 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_kernel, smpi, "Logging specific to SMPI (kernel)");
31 #include <boost/tokenizer.hpp>
32 #include <boost/algorithm/string.hpp> /* trim_right / trim_left */
36 const char* papi_default_config_name = "default";
38 struct papi_process_data {
39 papi_counter_t counter_data;
44 std::unordered_map<std::string, double> location2speedup;
46 typedef struct s_smpi_process_data {
50 simgrid::s4u::MailboxPtr mailbox;
51 simgrid::s4u::MailboxPtr mailbox_small;
52 xbt_mutex_t mailboxes_mutex;
57 void *data; /* user data */
60 int sampling; /* inside an SMPI_SAMPLE_ block? */
62 bool replaying; /* is the process replaying a trace */
63 msg_bar_t finalization_barrier;
65 smpi_trace_call_location_t trace_call_loc;
67 /** Contains hardware data as read by PAPI **/
69 papi_counter_t papi_counter_data;
71 } s_smpi_process_data_t;
73 static smpi_process_data_t *process_data = nullptr;
74 int process_count = 0;
75 int smpi_universe_size = 0;
76 int* index_to_process_data = nullptr;
77 extern double smpi_total_benched_time;
78 extern xbt_dict_t smpi_type_keyvals;
79 extern xbt_dict_t smpi_comm_keyvals;
80 xbt_os_timer_t global_timer;
81 MPI_Comm MPI_COMM_WORLD = MPI_COMM_UNINITIALIZED;
82 MPI_Errhandler *MPI_ERRORS_RETURN = nullptr;
83 MPI_Errhandler *MPI_ERRORS_ARE_FATAL = nullptr;
84 MPI_Errhandler *MPI_ERRHANDLER_NULL = nullptr;
86 void (*smpi_comm_copy_data_callback) (smx_activity_t, void*, size_t) = &smpi_comm_copy_buffer_callback;
88 #define MAILBOX_NAME_MAXLEN (5 + sizeof(int) * 2 + 1)
90 static char *get_mailbox_name(char *str, int index)
92 snprintf(str, MAILBOX_NAME_MAXLEN, "SMPI-%0*x", static_cast<int> (sizeof(int) * 2), index);
96 static char *get_mailbox_name_small(char *str, int index)
98 snprintf(str, MAILBOX_NAME_MAXLEN, "small%0*x", static_cast<int> (sizeof(int) * 2), index);
102 void smpi_process_init(int *argc, char ***argv)
105 if (process_data == nullptr){
106 printf("SimGrid was not initialized properly before entering MPI_Init. Aborting, please check compilation process and use smpirun\n");
109 if (argc != nullptr && argv != nullptr) {
110 smx_actor_t proc = SIMIX_process_self();
111 proc->context->set_cleanup(&MSG_process_cleanup_from_SIMIX);
112 char* instance_id = (*argv)[1];
113 int rank = xbt_str_parse_int((*argv)[2], "Invalid rank: %s");
114 int index = smpi_process_index_of_smx_process(proc);
116 if(index_to_process_data == nullptr){
117 index_to_process_data=static_cast<int*>(xbt_malloc(SIMIX_process_count()*sizeof(int)));
120 if(smpi_privatize_global_variables){
121 /* Now using segment index of the process */
122 index = proc->segment_index;
123 /* Done at the process's creation */
124 SMPI_switch_data_segment(index);
127 MPI_Comm* temp_comm_world;
129 smpi_deployment_register_process(instance_id, rank, index, &temp_comm_world, &temp_bar);
130 smpi_process_data_t data = smpi_process_remote_data(index);
131 data->comm_world = temp_comm_world;
132 if(temp_bar != nullptr)
133 data->finalization_barrier = temp_bar;
135 data->instance_id = instance_id;
136 data->replaying = false;
138 static_cast<simgrid::MsgActorExt*>(proc->data)->data = data;
141 memmove(&(*argv)[0], &(*argv)[2], sizeof(char *) * (*argc - 2));
142 (*argv)[(*argc) - 1] = nullptr;
143 (*argv)[(*argc) - 2] = nullptr;
148 // set the process attached to the mailbox
149 data->mailbox_small->setReceiver(simgrid::s4u::Actor::self());
150 XBT_DEBUG("<%d> New process in the game: %p", index, proc);
152 xbt_assert(smpi_process_data(),
153 "smpi_process_data() returned nullptr. You probably gave a nullptr parameter to MPI_Init. "
154 "Although it's required by MPI-2, this is currently not supported by SMPI.");
157 void smpi_process_destroy()
159 int index = smpi_process_index();
160 if(smpi_privatize_global_variables){
161 smpi_switch_data_segment(index);
163 process_data[index_to_process_data[index]]->state = SMPI_FINALIZED;
164 XBT_DEBUG("<%d> Process left the game", index);
167 /** @brief Prepares the current process for termination. */
168 void smpi_process_finalize()
170 // This leads to an explosion of the search graph which cannot be reduced:
171 if(MC_is_active() || MC_record_replay_is_active())
174 int index = smpi_process_index();
175 // wait for all pending asynchronous comms to finish
176 MSG_barrier_wait(process_data[index_to_process_data[index]]->finalization_barrier);
179 /** @brief Check if a process is finalized */
180 int smpi_process_finalized()
182 int index = smpi_process_index();
183 if (index != MPI_UNDEFINED)
184 return (process_data[index_to_process_data[index]]->state == SMPI_FINALIZED);
189 /** @brief Check if a process is initialized */
190 int smpi_process_initialized()
192 if (index_to_process_data == nullptr){
195 int index = smpi_process_index();
196 return ((index != MPI_UNDEFINED) && (process_data[index_to_process_data[index]]->state == SMPI_INITIALIZED));
200 /** @brief Mark a process as initialized (=MPI_Init called) */
201 void smpi_process_mark_as_initialized()
203 int index = smpi_process_index();
204 if ((index != MPI_UNDEFINED) && (process_data[index_to_process_data[index]]->state != SMPI_FINALIZED))
205 process_data[index_to_process_data[index]]->state = SMPI_INITIALIZED;
208 void smpi_process_set_replaying(bool value){
209 int index = smpi_process_index();
210 if ((index != MPI_UNDEFINED) && (process_data[index_to_process_data[index]]->state != SMPI_FINALIZED))
211 process_data[index_to_process_data[index]]->replaying = value;
214 bool smpi_process_get_replaying(){
215 int index = smpi_process_index();
216 if (index != MPI_UNDEFINED)
217 return process_data[index_to_process_data[index]]->replaying;
218 else return (_xbt_replay_is_active() != 0);
221 int smpi_global_size()
223 char *value = getenv("SMPI_GLOBAL_SIZE");
224 xbt_assert(value,"Please set env var SMPI_GLOBAL_SIZE to the expected number of processes.");
226 return xbt_str_parse_int(value, "SMPI_GLOBAL_SIZE contains a non-numerical value: %s");
229 smpi_process_data_t smpi_process_data()
231 simgrid::MsgActorExt* msgExt = static_cast<simgrid::MsgActorExt*>(SIMIX_process_self()->data);
232 return static_cast<smpi_process_data_t>(msgExt->data);
235 smpi_process_data_t smpi_process_remote_data(int index)
237 return process_data[index_to_process_data[index]];
240 void smpi_process_set_user_data(void *data)
242 smpi_process_data_t process_data = smpi_process_data();
243 process_data->data = data;
246 void *smpi_process_get_user_data()
248 smpi_process_data_t process_data = smpi_process_data();
249 return process_data->data;
252 int smpi_process_count()
254 return process_count;
258 * \brief Returns a structure that stores the location (filename + linenumber)
259 * of the last calls to MPI_* functions.
261 * \see smpi_trace_set_call_location
263 smpi_trace_call_location_t* smpi_process_get_call_location()
265 smpi_process_data_t process_data = smpi_process_data();
266 return &process_data->trace_call_loc;
269 int smpi_process_index()
271 smpi_process_data_t data = smpi_process_data();
272 //return -1 if not initialized
273 return data != nullptr ? data->index : MPI_UNDEFINED;
276 MPI_Comm smpi_process_comm_world()
278 smpi_process_data_t data = smpi_process_data();
279 //return MPI_COMM_NULL if not initialized
280 return data != nullptr ? *data->comm_world : MPI_COMM_NULL;
283 smx_mailbox_t smpi_process_mailbox()
285 smpi_process_data_t data = smpi_process_data();
286 return data->mailbox->getImpl();
289 smx_mailbox_t smpi_process_mailbox_small()
291 smpi_process_data_t data = smpi_process_data();
292 return data->mailbox_small->getImpl();
295 xbt_mutex_t smpi_process_mailboxes_mutex()
297 smpi_process_data_t data = smpi_process_data();
298 return data->mailboxes_mutex;
301 smx_mailbox_t smpi_process_remote_mailbox(int index)
303 smpi_process_data_t data = smpi_process_remote_data(index);
304 return data->mailbox->getImpl();
307 smx_mailbox_t smpi_process_remote_mailbox_small(int index)
309 smpi_process_data_t data = smpi_process_remote_data(index);
310 return data->mailbox_small->getImpl();
313 xbt_mutex_t smpi_process_remote_mailboxes_mutex(int index)
315 smpi_process_data_t data = smpi_process_remote_data(index);
316 return data->mailboxes_mutex;
320 int smpi_process_papi_event_set(void)
322 smpi_process_data_t data = smpi_process_data();
323 return data->papi_event_set;
326 papi_counter_t& smpi_process_papi_counters(void)
328 smpi_process_data_t data = smpi_process_data();
329 return data->papi_counter_data;
333 xbt_os_timer_t smpi_process_timer()
335 smpi_process_data_t data = smpi_process_data();
339 void smpi_process_simulated_start()
341 smpi_process_data_t data = smpi_process_data();
342 data->simulated = SIMIX_get_clock();
345 double smpi_process_simulated_elapsed()
347 smpi_process_data_t data = smpi_process_data();
348 return SIMIX_get_clock() - data->simulated;
351 MPI_Comm smpi_process_comm_self()
353 smpi_process_data_t data = smpi_process_data();
354 if(data->comm_self==MPI_COMM_NULL){
355 MPI_Group group = smpi_group_new(1);
356 data->comm_self = smpi_comm_new(group, nullptr);
357 smpi_group_set_mapping(group, smpi_process_index(), 0);
360 return data->comm_self;
363 MPI_Comm smpi_process_get_comm_intra()
365 smpi_process_data_t data = smpi_process_data();
366 return data->comm_intra;
369 void smpi_process_set_comm_intra(MPI_Comm comm)
371 smpi_process_data_t data = smpi_process_data();
372 data->comm_intra = comm;
375 void smpi_process_set_sampling(int s)
377 smpi_process_data_t data = smpi_process_data();
381 int smpi_process_get_sampling()
383 smpi_process_data_t data = smpi_process_data();
384 return data->sampling;
387 void print_request(const char *message, MPI_Request request)
389 XBT_VERB("%s request %p [buf = %p, size = %zu, src = %d, dst = %d, tag = %d, flags = %x]",
390 message, request, request->buf, request->size, request->src, request->dst, request->tag, request->flags);
393 void smpi_comm_set_copy_data_callback(void (*callback) (smx_activity_t, void*, size_t))
395 smpi_comm_copy_data_callback = callback;
398 void smpi_comm_copy_buffer_callback(smx_activity_t synchro, void *buff, size_t buff_size)
400 XBT_DEBUG("Copy the data over");
402 simgrid::kernel::activity::Comm *comm = dynamic_cast<simgrid::kernel::activity::Comm*>(synchro);
404 if((smpi_privatize_global_variables) && (static_cast<char*>(buff) >= smpi_start_data_exe)
405 && (static_cast<char*>(buff) < smpi_start_data_exe + smpi_size_data_exe )
407 XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !");
409 smpi_switch_data_segment(
410 (static_cast<smpi_process_data_t>((static_cast<simgrid::MsgActorExt*>(comm->src_proc->data)->data))->index));
411 tmpbuff = static_cast<void*>(xbt_malloc(buff_size));
412 memcpy(tmpbuff, buff, buff_size);
415 if((smpi_privatize_global_variables) && ((char*)comm->dst_buff >= smpi_start_data_exe)
416 && ((char*)comm->dst_buff < smpi_start_data_exe + smpi_size_data_exe )){
417 XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment");
418 smpi_switch_data_segment(
419 (static_cast<smpi_process_data_t>((static_cast<simgrid::MsgActorExt*>(comm->dst_proc->data)->data))->index));
422 memcpy(comm->dst_buff, tmpbuff, buff_size);
423 if (comm->detached) {
424 // if this is a detached send, the source buffer was duplicated by SMPI
425 // sender to make the original buffer available to the application ASAP
427 //It seems that the request is used after the call there this should be free somewhere else but where???
428 //xbt_free(comm->comm.src_data);// inside SMPI the request is kept inside the user data and should be free
429 comm->src_buff = nullptr;
432 if(tmpbuff!=buff)xbt_free(tmpbuff);
435 void smpi_comm_null_copy_buffer_callback(smx_activity_t comm, void *buff, size_t buff_size)
437 /* nothing done in this version */
440 static void smpi_check_options(){
441 //check correctness of MPI parameters
443 xbt_assert(xbt_cfg_get_int("smpi/async-small-thresh") <= xbt_cfg_get_int("smpi/send-is-detached-thresh"));
445 if (xbt_cfg_is_default_value("smpi/host-speed")) {
446 XBT_INFO("You did not set the power of the host running the simulation. "
447 "The timings will certainly not be accurate. "
448 "Use the option \"--cfg=smpi/host-speed:<flops>\" to set its value."
449 "Check http://simgrid.org/simgrid/latest/doc/options.html#options_smpi_bench for more information.");
452 xbt_assert(xbt_cfg_get_double("smpi/cpu-threshold") >=0,
453 "The 'smpi/cpu-threshold' option cannot have negative values [anymore]. If you want to discard "
454 "the simulation of any computation, please use 'smpi/simulate-computation:no' instead.");
458 return process_data != nullptr;
461 void smpi_global_init()
465 char name[MAILBOX_NAME_MAXLEN];
468 if (!MC_is_active()) {
469 global_timer = xbt_os_timer_new();
470 xbt_os_walltimer_start(global_timer);
473 if (xbt_cfg_get_string("smpi/comp-adjustment-file")[0] != '\0') {
474 std::string filename {xbt_cfg_get_string("smpi/comp-adjustment-file")};
475 std::ifstream fstream(filename);
476 if (!fstream.is_open()) {
477 xbt_die("Could not open file %s. Does it exist?", filename.c_str());
481 typedef boost::tokenizer< boost::escaped_list_separator<char>> Tokenizer;
482 std::getline(fstream, line); // Skip the header line
483 while (std::getline(fstream, line)) {
485 Tokenizer::iterator it = tok.begin();
486 Tokenizer::iterator end = std::next(tok.begin());
488 std::string location = *it;
489 boost::trim(location);
490 location2speedup.insert(std::pair<std::string, double>(location, std::stod(*end)));
495 // This map holds for each computation unit (such as "default" or "process1" etc.)
496 // the configuration as given by the user (counter data as a pair of (counter_name, counter_counter))
497 // and the (computed) event_set.
498 std::map</* computation unit name */ std::string, papi_process_data> units2papi_setup;
500 if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
501 if (PAPI_library_init(PAPI_VER_CURRENT) != PAPI_VER_CURRENT)
502 XBT_ERROR("Could not initialize PAPI library; is it correctly installed and linked?"
503 " Expected version is %i",
506 typedef boost::tokenizer<boost::char_separator<char>> Tokenizer;
507 boost::char_separator<char> separator_units(";");
508 std::string str = std::string(xbt_cfg_get_string("smpi/papi-events"));
509 Tokenizer tokens(str, separator_units);
511 // Iterate over all the computational units. This could be
512 // processes, hosts, threads, ranks... You name it. I'm not exactly
513 // sure what we will support eventually, so I'll leave it at the
514 // general term "units".
515 for (auto& unit_it : tokens) {
516 boost::char_separator<char> separator_events(":");
517 Tokenizer event_tokens(unit_it, separator_events);
519 int event_set = PAPI_NULL;
520 if (PAPI_create_eventset(&event_set) != PAPI_OK) {
521 // TODO: Should this let the whole simulation die?
522 XBT_CRITICAL("Could not create PAPI event set during init.");
525 // NOTE: We cannot use a map here, as we must obey the order of the counters
526 // This is important for PAPI: We need to map the values of counters back
527 // to the event_names (so, when PAPI_read() has finished)!
528 papi_counter_t counters2values;
530 // Iterate over all counters that were specified for this specific
532 // Note that we need to remove the name of the unit
533 // (that could also be the "default" value), which always comes first.
534 // Hence, we start at ++(events.begin())!
535 for (Tokenizer::iterator events_it = ++(event_tokens.begin()); events_it != event_tokens.end(); events_it++) {
537 int event_code = PAPI_NULL;
538 char* event_name = const_cast<char*>((*events_it).c_str());
539 if (PAPI_event_name_to_code(event_name, &event_code) == PAPI_OK) {
540 if (PAPI_add_event(event_set, event_code) != PAPI_OK) {
541 XBT_ERROR("Could not add PAPI event '%s'. Skipping.", event_name);
544 XBT_DEBUG("Successfully added PAPI event '%s' to the event set.", event_name);
547 XBT_CRITICAL("Could not find PAPI event '%s'. Skipping.", event_name);
551 counters2values.push_back(
552 // We cannot just pass *events_it, as this is of type const basic_string
553 std::make_pair<std::string, long long>(std::string(*events_it), 0));
556 std::string unit_name = *(event_tokens.begin());
557 papi_process_data config = {.counter_data = std::move(counters2values), .event_set = event_set};
559 units2papi_setup.insert(std::make_pair(unit_name, std::move(config)));
563 if (process_count == 0){
564 process_count = SIMIX_process_count();
567 smpi_universe_size = process_count;
568 process_data = new smpi_process_data_t[process_count];
569 for (i = 0; i < process_count; i++) {
570 process_data[i] = new s_smpi_process_data_t;
571 process_data[i]->argc = nullptr;
572 process_data[i]->argv = nullptr;
573 process_data[i]->mailbox = simgrid::s4u::Mailbox::byName(get_mailbox_name(name, i));
574 process_data[i]->mailbox_small = simgrid::s4u::Mailbox::byName(get_mailbox_name_small(name, i));
575 process_data[i]->mailboxes_mutex = xbt_mutex_init();
576 process_data[i]->timer = xbt_os_timer_new();
578 MC_ignore_heap(process_data[i]->timer, xbt_os_timer_size());
579 process_data[i]->comm_self = MPI_COMM_NULL;
580 process_data[i]->comm_intra = MPI_COMM_NULL;
581 process_data[i]->comm_world = nullptr;
582 process_data[i]->state = SMPI_UNINITIALIZED;
583 process_data[i]->sampling = 0;
584 process_data[i]->finalization_barrier = nullptr;
585 process_data[i]->return_value = 0;
588 if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
589 // TODO: Implement host/process/thread based counters. This implementation
590 // just always takes the values passed via "default", like this:
591 // "default:COUNTER1:COUNTER2:COUNTER3;".
592 auto it = units2papi_setup.find(papi_default_config_name);
593 if (it != units2papi_setup.end()) {
594 process_data[i]->papi_event_set = it->second.event_set;
595 process_data[i]->papi_counter_data = it->second.counter_data;
596 XBT_DEBUG("Setting PAPI set for process %i", i);
598 process_data[i]->papi_event_set = PAPI_NULL;
599 XBT_DEBUG("No PAPI set for process %i", i);
604 //if the process was launched through smpirun script we generate a global mpi_comm_world
605 //if not, we let MPI_COMM_NULL, and the comm world will be private to each mpi instance
607 group = smpi_group_new(process_count);
608 MPI_COMM_WORLD = smpi_comm_new(group, nullptr);
609 MPI_Attr_put(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, reinterpret_cast<void *>(process_count));
610 msg_bar_t bar = MSG_barrier_init(process_count);
612 for (i = 0; i < process_count; i++) {
613 smpi_group_set_mapping(group, i, i);
614 process_data[i]->finalization_barrier = bar;
619 void smpi_global_destroy()
621 int count = smpi_process_count();
623 smpi_bench_destroy();
624 if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
625 while (smpi_group_unuse(smpi_comm_group(MPI_COMM_WORLD)) > 0);
626 MSG_barrier_destroy(process_data[0]->finalization_barrier);
628 smpi_deployment_cleanup_instances();
630 for (int i = 0; i < count; i++) {
631 if(process_data[i]->comm_self!=MPI_COMM_NULL){
632 smpi_comm_destroy(process_data[i]->comm_self);
634 if(process_data[i]->comm_intra!=MPI_COMM_NULL){
635 smpi_comm_destroy(process_data[i]->comm_intra);
637 xbt_os_timer_free(process_data[i]->timer);
638 xbt_mutex_destroy(process_data[i]->mailboxes_mutex);
639 delete process_data[i];
641 delete[] process_data;
642 process_data = nullptr;
644 if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
645 smpi_comm_cleanup_smp(MPI_COMM_WORLD);
646 smpi_comm_cleanup_attributes(MPI_COMM_WORLD);
647 if(smpi_coll_cleanup_callback!=nullptr)
648 smpi_coll_cleanup_callback();
649 xbt_free(MPI_COMM_WORLD);
652 MPI_COMM_WORLD = MPI_COMM_NULL;
654 if (!MC_is_active()) {
655 xbt_os_timer_free(global_timer);
658 xbt_free(index_to_process_data);
659 if(smpi_type_keyvals!=nullptr)
660 xbt_dict_free(&smpi_type_keyvals);
661 if(smpi_comm_keyvals!=nullptr)
662 xbt_dict_free(&smpi_comm_keyvals);
663 if(smpi_privatize_global_variables)
664 smpi_destroy_global_memory_segments();
670 void __attribute__ ((weak)) user_main_()
672 xbt_die("Should not be in this smpi_simulated_main");
675 int __attribute__ ((weak)) smpi_simulated_main_(int argc, char **argv)
677 smpi_process_init(&argc, &argv);
682 inline static int smpi_main_wrapper(int argc, char **argv){
683 int ret = smpi_simulated_main_(argc,argv);
685 XBT_WARN("SMPI process did not return 0. Return value : %d", ret);
686 smpi_process_data()->return_value=ret;
691 int __attribute__ ((weak)) main(int argc, char **argv)
693 return smpi_main(smpi_main_wrapper, argc, argv);
699 static void smpi_init_logs(){
701 /* Connect log categories. See xbt/log.c */
703 XBT_LOG_CONNECT(smpi); /* Keep this line as soon as possible in this function: xbt_log_appender_file.c depends on it
704 DO NOT connect this in XBT or so, or it will be useless to xbt_log_appender_file.c */
705 XBT_LOG_CONNECT(instr_smpi);
706 XBT_LOG_CONNECT(smpi_base);
707 XBT_LOG_CONNECT(smpi_bench);
708 XBT_LOG_CONNECT(smpi_coll);
709 XBT_LOG_CONNECT(smpi_colls);
710 XBT_LOG_CONNECT(smpi_comm);
711 XBT_LOG_CONNECT(smpi_dvfs);
712 XBT_LOG_CONNECT(smpi_group);
713 XBT_LOG_CONNECT(smpi_kernel);
714 XBT_LOG_CONNECT(smpi_mpi);
715 XBT_LOG_CONNECT(smpi_mpi_dt);
716 XBT_LOG_CONNECT(smpi_pmpi);
717 XBT_LOG_CONNECT(smpi_replay);
718 XBT_LOG_CONNECT(smpi_rma);
722 static void smpi_init_options(){
723 int gather_id = find_coll_description(mpi_coll_gather_description, xbt_cfg_get_string("smpi/gather"),"gather");
724 mpi_coll_gather_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int, MPI_Datatype, int, MPI_Comm)>
725 (mpi_coll_gather_description[gather_id].coll);
727 int allgather_id = find_coll_description(mpi_coll_allgather_description,
728 xbt_cfg_get_string("smpi/allgather"),"allgather");
729 mpi_coll_allgather_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int, MPI_Datatype, MPI_Comm)>
730 (mpi_coll_allgather_description[allgather_id].coll);
732 int allgatherv_id = find_coll_description(mpi_coll_allgatherv_description,
733 xbt_cfg_get_string("smpi/allgatherv"),"allgatherv");
734 mpi_coll_allgatherv_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int *, int *, MPI_Datatype, MPI_Comm)>
735 (mpi_coll_allgatherv_description[allgatherv_id].coll);
737 int allreduce_id = find_coll_description(mpi_coll_allreduce_description,
738 xbt_cfg_get_string("smpi/allreduce"),"allreduce");
739 mpi_coll_allreduce_fun = reinterpret_cast<int (*)(void *sbuf, void *rbuf, int rcount, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)>
740 (mpi_coll_allreduce_description[allreduce_id].coll);
742 int alltoall_id = find_coll_description(mpi_coll_alltoall_description,
743 xbt_cfg_get_string("smpi/alltoall"),"alltoall");
744 mpi_coll_alltoall_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int, MPI_Datatype, MPI_Comm)>
745 (mpi_coll_alltoall_description[alltoall_id].coll);
747 int alltoallv_id = find_coll_description(mpi_coll_alltoallv_description,
748 xbt_cfg_get_string("smpi/alltoallv"),"alltoallv");
749 mpi_coll_alltoallv_fun = reinterpret_cast<int (*)(void *, int *, int *, MPI_Datatype, void *, int *, int *, MPI_Datatype, MPI_Comm)>
750 (mpi_coll_alltoallv_description[alltoallv_id].coll);
752 int bcast_id = find_coll_description(mpi_coll_bcast_description, xbt_cfg_get_string("smpi/bcast"),"bcast");
753 mpi_coll_bcast_fun = reinterpret_cast<int (*)(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm com)>
754 (mpi_coll_bcast_description[bcast_id].coll);
756 int reduce_id = find_coll_description(mpi_coll_reduce_description, xbt_cfg_get_string("smpi/reduce"),"reduce");
757 mpi_coll_reduce_fun = reinterpret_cast<int (*)(void *buf, void *rbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)>
758 (mpi_coll_reduce_description[reduce_id].coll);
760 int reduce_scatter_id =
761 find_coll_description(mpi_coll_reduce_scatter_description,
762 xbt_cfg_get_string("smpi/reduce-scatter"),"reduce_scatter");
763 mpi_coll_reduce_scatter_fun = reinterpret_cast<int (*)(void *sbuf, void *rbuf, int *rcounts,MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)>
764 (mpi_coll_reduce_scatter_description[reduce_scatter_id].coll);
766 int scatter_id = find_coll_description(mpi_coll_scatter_description, xbt_cfg_get_string("smpi/scatter"),"scatter");
767 mpi_coll_scatter_fun = reinterpret_cast<int (*)(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf,int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)>
768 (mpi_coll_scatter_description[scatter_id].coll);
770 int barrier_id = find_coll_description(mpi_coll_barrier_description, xbt_cfg_get_string("smpi/barrier"),"barrier");
771 mpi_coll_barrier_fun = reinterpret_cast<int (*)(MPI_Comm comm)>
772 (mpi_coll_barrier_description[barrier_id].coll);
774 smpi_coll_cleanup_callback=nullptr;
775 smpi_cpu_threshold = xbt_cfg_get_double("smpi/cpu-threshold");
776 smpi_host_speed = xbt_cfg_get_double("smpi/host-speed");
777 smpi_privatize_global_variables = xbt_cfg_get_boolean("smpi/privatize-global-variables");
778 if (smpi_cpu_threshold < 0)
779 smpi_cpu_threshold = DBL_MAX;
781 char* val = xbt_cfg_get_string("smpi/shared-malloc");
782 if (!strcasecmp(val, "yes") || !strcmp(val, "1") || !strcasecmp(val, "on") || !strcasecmp(val, "global")) {
783 smpi_cfg_shared_malloc = shmalloc_global;
784 } else if (!strcasecmp(val, "local")) {
785 smpi_cfg_shared_malloc = shmalloc_local;
786 } else if (!strcasecmp(val, "no") || !strcmp(val, "0") || !strcasecmp(val, "off")) {
787 smpi_cfg_shared_malloc = shmalloc_none;
789 xbt_die("Invalid value '%s' for option smpi/shared-malloc. Possible values: 'on' or 'global', 'local', 'off'",
794 int smpi_main(int (*realmain) (int argc, char *argv[]), int argc, char *argv[])
796 srand(SMPI_RAND_SEED);
798 if (getenv("SMPI_PRETEND_CC") != nullptr) {
799 /* Hack to ensure that smpicc can pretend to be a simple compiler. Particularly handy to pass it to the
800 * configuration tools */
805 TRACE_global_init(&argc, argv);
806 TRACE_add_start_function(TRACE_smpi_alloc);
807 TRACE_add_end_function(TRACE_smpi_release);
809 SIMIX_global_init(&argc, argv);
810 MSG_init(&argc,argv);
812 SMPI_switch_data_segment = &smpi_switch_data_segment;
816 // parse the platform file: get the host list
817 SIMIX_create_environment(argv[1]);
818 SIMIX_comm_set_copy_data_callback(smpi_comm_copy_data_callback);
819 SIMIX_function_register_default(realmain);
820 SIMIX_launch_application(argv[2]);
824 smpi_check_options();
826 if(smpi_privatize_global_variables)
827 smpi_initialize_global_memory_segments();
829 /* Clean IO before the run */
833 if (MC_is_active()) {
839 xbt_os_walltimer_stop(global_timer);
840 if (xbt_cfg_get_boolean("smpi/display-timing")){
841 double global_time = xbt_os_timer_elapsed(global_timer);
842 XBT_INFO("Simulated time: %g seconds. \n\n"
843 "The simulation took %g seconds (after parsing and platform setup)\n"
844 "%g seconds were actual computation of the application",
845 SIMIX_get_clock(), global_time , smpi_total_benched_time);
847 if (smpi_total_benched_time/global_time>=0.75)
848 XBT_INFO("More than 75%% of the time was spent inside the application code.\n"
849 "You may want to use sampling functions or trace replay to reduce this.");
852 int count = smpi_process_count();
854 for (i = 0; i < count; i++) {
855 if(process_data[i]->return_value!=0){
856 ret=process_data[i]->return_value;//return first non 0 value
860 smpi_global_destroy();
867 // This function can be called from extern file, to initialize logs, options, and processes of smpi
868 // without the need of smpirun
873 smpi_check_options();
874 if (TRACE_is_enabled() && TRACE_is_configured())
876 if(smpi_privatize_global_variables)
877 smpi_initialize_global_memory_segments();
880 void SMPI_finalize(){
881 smpi_global_destroy();