1 /* Copyright (c) 2007-2015. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
9 #include "smpi_mpi_dt_private.h"
11 #include "src/mc/mc_record.h"
12 #include "xbt/replay.h"
13 #include "surf/surf.h"
14 #include "src/simix/smx_private.h"
15 #include "simgrid/sg_config.h"
16 #include "src/mc/mc_replay.h"
17 #include "src/msg/msg_private.h"
18 #include "src/kernel/activity/SynchroComm.hpp"
20 #include <float.h> /* DBL_MAX */
29 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_kernel, smpi, "Logging specific to SMPI (kernel)");
30 #include <boost/tokenizer.hpp>
31 #include <boost/algorithm/string.hpp> /* trim_right / trim_left */
35 const char* papi_default_config_name = "default";
37 struct papi_process_data {
38 papi_counter_t counter_data;
43 std::unordered_map<std::string, double> location2speedup;
45 typedef struct s_smpi_process_data {
49 smx_mailbox_t mailbox;
50 smx_mailbox_t mailbox_small;
51 xbt_mutex_t mailboxes_mutex;
56 void *data; /* user data */
59 int sampling; /* inside an SMPI_SAMPLE_ block? */
61 bool replaying; /* is the process replaying a trace */
62 xbt_bar_t finalization_barrier;
64 smpi_trace_call_location_t trace_call_loc;
66 /** Contains hardware data as read by PAPI **/
68 papi_counter_t papi_counter_data;
70 } s_smpi_process_data_t;
72 static smpi_process_data_t *process_data = nullptr;
73 int process_count = 0;
74 int smpi_universe_size = 0;
75 int* index_to_process_data = nullptr;
76 extern double smpi_total_benched_time;
77 xbt_os_timer_t global_timer;
78 MPI_Comm MPI_COMM_WORLD = MPI_COMM_UNINITIALIZED;
79 MPI_Errhandler *MPI_ERRORS_RETURN = nullptr;
80 MPI_Errhandler *MPI_ERRORS_ARE_FATAL = nullptr;
81 MPI_Errhandler *MPI_ERRHANDLER_NULL = nullptr;
83 #define MAILBOX_NAME_MAXLEN (5 + sizeof(int) * 2 + 1)
85 static char *get_mailbox_name(char *str, int index)
87 snprintf(str, MAILBOX_NAME_MAXLEN, "SMPI-%0*x", static_cast<int> (sizeof(int) * 2), index);
91 static char *get_mailbox_name_small(char *str, int index)
93 snprintf(str, MAILBOX_NAME_MAXLEN, "small%0*x", static_cast<int> (sizeof(int) * 2), index);
97 void smpi_process_init(int *argc, char ***argv)
100 if (argc != nullptr && argv != nullptr) {
101 smx_process_t proc = SIMIX_process_self();
102 proc->context->set_cleanup(MSG_process_cleanup_from_SIMIX);
103 char* instance_id = (*argv)[1];
104 int rank = xbt_str_parse_int((*argv)[2], "Invalid rank: %s");
105 int index = smpi_process_index_of_smx_process(proc);
107 if(index_to_process_data == nullptr){
108 index_to_process_data=static_cast<int*>(xbt_malloc(SIMIX_process_count()*sizeof(int)));
111 if(smpi_privatize_global_variables){
112 /* Now using segment index of the process */
113 index = proc->segment_index;
114 /* Done at the process's creation */
115 SMPI_switch_data_segment(index);
118 MPI_Comm* temp_comm_world;
120 smpi_deployment_register_process(instance_id, rank, index, &temp_comm_world, &temp_bar);
121 smpi_process_data_t data = smpi_process_remote_data(index);
122 data->comm_world = temp_comm_world;
123 if(temp_bar != nullptr)
124 data->finalization_barrier = temp_bar;
126 data->instance_id = instance_id;
127 data->replaying = false;
129 simdata_process_t simdata = static_cast<simdata_process_t>(simcall_process_get_data(proc));
130 simdata->data = data;
133 memmove(&(*argv)[0], &(*argv)[2], sizeof(char *) * (*argc - 2));
134 (*argv)[(*argc) - 1] = nullptr;
135 (*argv)[(*argc) - 2] = nullptr;
140 // set the process attached to the mailbox
141 simcall_mbox_set_receiver(data->mailbox_small, proc);
142 XBT_DEBUG("<%d> New process in the game: %p", index, proc);
144 xbt_assert(smpi_process_data(),
145 "smpi_process_data() returned nullptr. You probably gave a nullptr parameter to MPI_Init. "
146 "Although it's required by MPI-2, this is currently not supported by SMPI.");
149 void smpi_process_destroy()
151 int index = smpi_process_index();
152 if(smpi_privatize_global_variables){
153 smpi_switch_data_segment(index);
155 process_data[index_to_process_data[index]]->state = SMPI_FINALIZED;
156 XBT_DEBUG("<%d> Process left the game", index);
159 /** @brief Prepares the current process for termination. */
160 void smpi_process_finalize()
162 // This leads to an explosion of the search graph which cannot be reduced:
163 if(MC_is_active() || MC_record_replay_is_active())
166 int index = smpi_process_index();
167 // wait for all pending asynchronous comms to finish
168 xbt_barrier_wait(process_data[index_to_process_data[index]]->finalization_barrier);
171 /** @brief Check if a process is finalized */
172 int smpi_process_finalized()
174 int index = smpi_process_index();
175 if (index != MPI_UNDEFINED)
176 return (process_data[index_to_process_data[index]]->state == SMPI_FINALIZED);
181 /** @brief Check if a process is initialized */
182 int smpi_process_initialized()
184 if (index_to_process_data == nullptr){
187 int index = smpi_process_index();
188 return ((index != MPI_UNDEFINED) && (process_data[index_to_process_data[index]]->state == SMPI_INITIALIZED));
192 /** @brief Mark a process as initialized (=MPI_Init called) */
193 void smpi_process_mark_as_initialized()
195 int index = smpi_process_index();
196 if ((index != MPI_UNDEFINED) && (process_data[index_to_process_data[index]]->state != SMPI_FINALIZED))
197 process_data[index_to_process_data[index]]->state = SMPI_INITIALIZED;
200 void smpi_process_set_replaying(bool value){
201 int index = smpi_process_index();
202 if ((index != MPI_UNDEFINED) && (process_data[index_to_process_data[index]]->state != SMPI_FINALIZED))
203 process_data[index_to_process_data[index]]->replaying = value;
206 bool smpi_process_get_replaying(){
207 int index = smpi_process_index();
208 if (index != MPI_UNDEFINED)
209 return process_data[index_to_process_data[index]]->replaying;
210 else return (_xbt_replay_is_active() != 0);
213 int smpi_global_size()
215 char *value = getenv("SMPI_GLOBAL_SIZE");
216 xbt_assert(value,"Please set env var SMPI_GLOBAL_SIZE to the expected number of processes.");
218 return xbt_str_parse_int(value, "SMPI_GLOBAL_SIZE contains a non-numerical value: %s");
221 smpi_process_data_t smpi_process_data()
223 simdata_process_t simdata = static_cast<simdata_process_t>(SIMIX_process_self_get_data());
224 return static_cast<smpi_process_data_t>(simdata->data);
227 smpi_process_data_t smpi_process_remote_data(int index)
229 return process_data[index_to_process_data[index]];
232 void smpi_process_set_user_data(void *data)
234 smpi_process_data_t process_data = smpi_process_data();
235 process_data->data = data;
238 void *smpi_process_get_user_data()
240 smpi_process_data_t process_data = smpi_process_data();
241 return process_data->data;
244 int smpi_process_count()
246 return process_count;
250 * \brief Returns a structure that stores the location (filename + linenumber)
251 * of the last calls to MPI_* functions.
253 * \see smpi_trace_set_call_location
255 smpi_trace_call_location_t* smpi_process_get_call_location()
257 smpi_process_data_t process_data = smpi_process_data();
258 return &process_data->trace_call_loc;
261 int smpi_process_index()
263 smpi_process_data_t data = smpi_process_data();
264 //return -1 if not initialized
265 return data != nullptr ? data->index : MPI_UNDEFINED;
268 MPI_Comm smpi_process_comm_world()
270 smpi_process_data_t data = smpi_process_data();
271 //return MPI_COMM_NULL if not initialized
272 return data != nullptr ? *data->comm_world : MPI_COMM_NULL;
275 smx_mailbox_t smpi_process_mailbox()
277 smpi_process_data_t data = smpi_process_data();
278 return data->mailbox;
281 smx_mailbox_t smpi_process_mailbox_small()
283 smpi_process_data_t data = smpi_process_data();
284 return data->mailbox_small;
287 xbt_mutex_t smpi_process_mailboxes_mutex()
289 smpi_process_data_t data = smpi_process_data();
290 return data->mailboxes_mutex;
293 smx_mailbox_t smpi_process_remote_mailbox(int index)
295 smpi_process_data_t data = smpi_process_remote_data(index);
296 return data->mailbox;
299 smx_mailbox_t smpi_process_remote_mailbox_small(int index)
301 smpi_process_data_t data = smpi_process_remote_data(index);
302 return data->mailbox_small;
305 xbt_mutex_t smpi_process_remote_mailboxes_mutex(int index)
307 smpi_process_data_t data = smpi_process_remote_data(index);
308 return data->mailboxes_mutex;
312 int smpi_process_papi_event_set(void)
314 smpi_process_data_t data = smpi_process_data();
315 return data->papi_event_set;
318 papi_counter_t& smpi_process_papi_counters(void)
320 smpi_process_data_t data = smpi_process_data();
321 return data->papi_counter_data;
325 xbt_os_timer_t smpi_process_timer()
327 smpi_process_data_t data = smpi_process_data();
331 void smpi_process_simulated_start()
333 smpi_process_data_t data = smpi_process_data();
334 data->simulated = SIMIX_get_clock();
337 double smpi_process_simulated_elapsed()
339 smpi_process_data_t data = smpi_process_data();
340 return SIMIX_get_clock() - data->simulated;
343 MPI_Comm smpi_process_comm_self()
345 smpi_process_data_t data = smpi_process_data();
346 if(data->comm_self==MPI_COMM_NULL){
347 MPI_Group group = smpi_group_new(1);
348 data->comm_self = smpi_comm_new(group, nullptr);
349 smpi_group_set_mapping(group, smpi_process_index(), 0);
352 return data->comm_self;
355 MPI_Comm smpi_process_get_comm_intra()
357 smpi_process_data_t data = smpi_process_data();
358 return data->comm_intra;
361 void smpi_process_set_comm_intra(MPI_Comm comm)
363 smpi_process_data_t data = smpi_process_data();
364 data->comm_intra = comm;
367 void smpi_process_set_sampling(int s)
369 smpi_process_data_t data = smpi_process_data();
373 int smpi_process_get_sampling()
375 smpi_process_data_t data = smpi_process_data();
376 return data->sampling;
379 void print_request(const char *message, MPI_Request request)
381 XBT_VERB("%s request %p [buf = %p, size = %zu, src = %d, dst = %d, tag = %d, flags = %x]",
382 message, request, request->buf, request->size, request->src, request->dst, request->tag, request->flags);
385 void smpi_comm_copy_buffer_callback(smx_activity_t synchro, void *buff, size_t buff_size)
387 XBT_DEBUG("Copy the data over");
389 simgrid::kernel::activity::Comm *comm = dynamic_cast<simgrid::kernel::activity::Comm*>(synchro);
391 if((smpi_privatize_global_variables) && (static_cast<char*>(buff) >= smpi_start_data_exe)
392 && (static_cast<char*>(buff) < smpi_start_data_exe + smpi_size_data_exe )
394 XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !");
397 smpi_switch_data_segment((static_cast<smpi_process_data_t>((static_cast<simdata_process_t>(SIMIX_process_get_data(comm->src_proc))->data))->index));
398 tmpbuff = static_cast<void*>(xbt_malloc(buff_size));
399 memcpy(tmpbuff, buff, buff_size);
402 if((smpi_privatize_global_variables) && ((char*)comm->dst_buff >= smpi_start_data_exe)
403 && ((char*)comm->dst_buff < smpi_start_data_exe + smpi_size_data_exe )){
404 XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment");
405 smpi_switch_data_segment((static_cast<smpi_process_data_t>((static_cast<simdata_process_t>(SIMIX_process_get_data(comm->dst_proc))->data))->index));
408 memcpy(comm->dst_buff, tmpbuff, buff_size);
409 if (comm->detached) {
410 // if this is a detached send, the source buffer was duplicated by SMPI
411 // sender to make the original buffer available to the application ASAP
413 //It seems that the request is used after the call there this should be free somewhere else but where???
414 //xbt_free(comm->comm.src_data);// inside SMPI the request is kept inside the user data and should be free
415 comm->src_buff = nullptr;
418 if(tmpbuff!=buff)xbt_free(tmpbuff);
421 void smpi_comm_null_copy_buffer_callback(smx_activity_t comm, void *buff, size_t buff_size)
426 static void smpi_check_options(){
427 //check correctness of MPI parameters
429 xbt_assert(xbt_cfg_get_int("smpi/async-small-thresh") <= xbt_cfg_get_int("smpi/send-is-detached-thresh"));
431 if (xbt_cfg_is_default_value("smpi/running-power")) {
432 XBT_INFO("You did not set the power of the host running the simulation. "
433 "The timings will certainly not be accurate. "
434 "Use the option \"--cfg=smpi/running-power:<flops>\" to set its value."
435 "Check http://simgrid.org/simgrid/latest/doc/options.html#options_smpi_bench for more information.");
440 return process_data != nullptr;
443 void smpi_global_init()
447 char name[MAILBOX_NAME_MAXLEN];
450 if (!MC_is_active()) {
451 global_timer = xbt_os_timer_new();
452 xbt_os_walltimer_start(global_timer);
455 if (xbt_cfg_get_string("smpi/comp-adjustment-file")[0] != '\0') {
456 std::string filename {xbt_cfg_get_string("smpi/comp-adjustment-file")};
457 std::ifstream fstream(filename);
458 if (!fstream.is_open()) {
459 xbt_die("Could not open file %s. Does it exist?", filename.c_str());
463 typedef boost::tokenizer< boost::escaped_list_separator<char>> Tokenizer;
464 std::getline(fstream, line); // Skip the header line
465 while (std::getline(fstream, line)) {
467 Tokenizer::iterator it = tok.begin();
468 Tokenizer::iterator end = std::next(tok.begin());
470 std::string location = *it;
471 boost::trim(location);
472 location2speedup.insert(std::pair<std::string, double>(location, std::stod(*end)));
477 // This map holds for each computation unit (such as "default" or "process1" etc.)
478 // the configuration as given by the user (counter data as a pair of (counter_name, counter_counter))
479 // and the (computed) event_set.
480 std::map</* computation unit name */ std::string, papi_process_data> units2papi_setup;
482 if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
483 if (PAPI_library_init(PAPI_VER_CURRENT) != PAPI_VER_CURRENT)
484 XBT_ERROR("Could not initialize PAPI library; is it correctly installed and linked?"
485 " Expected version is %i",
488 typedef boost::tokenizer<boost::char_separator<char>> Tokenizer;
489 boost::char_separator<char> separator_units(";");
490 std::string str = std::string(xbt_cfg_get_string("smpi/papi-events"));
491 Tokenizer tokens(str, separator_units);
493 // Iterate over all the computational units. This could be
494 // processes, hosts, threads, ranks... You name it. I'm not exactly
495 // sure what we will support eventually, so I'll leave it at the
496 // general term "units".
497 for (auto& unit_it : tokens) {
498 boost::char_separator<char> separator_events(":");
499 Tokenizer event_tokens(unit_it, separator_events);
501 int event_set = PAPI_NULL;
502 if (PAPI_create_eventset(&event_set) != PAPI_OK) {
503 // TODO: Should this let the whole simulation die?
504 XBT_CRITICAL("Could not create PAPI event set during init.");
507 // NOTE: We cannot use a map here, as we must obey the order of the counters
508 // This is important for PAPI: We need to map the values of counters back
509 // to the event_names (so, when PAPI_read() has finished)!
510 papi_counter_t counters2values;
512 // Iterate over all counters that were specified for this specific
514 // Note that we need to remove the name of the unit
515 // (that could also be the "default" value), which always comes first.
516 // Hence, we start at ++(events.begin())!
517 for (Tokenizer::iterator events_it = ++(event_tokens.begin()); events_it != event_tokens.end(); events_it++) {
519 int event_code = PAPI_NULL;
520 char* event_name = const_cast<char*>((*events_it).c_str());
521 if (PAPI_event_name_to_code(event_name, &event_code) == PAPI_OK) {
522 if (PAPI_add_event(event_set, event_code) != PAPI_OK) {
523 XBT_ERROR("Could not add PAPI event '%s'. Skipping.", event_name);
526 XBT_DEBUG("Successfully added PAPI event '%s' to the event set.", event_name);
529 XBT_CRITICAL("Could not find PAPI event '%s'. Skipping.", event_name);
533 counters2values.push_back(
534 // We cannot just pass *events_it, as this is of type const basic_string
535 std::make_pair<std::string, long long>(std::string(*events_it), 0));
538 std::string unit_name = *(event_tokens.begin());
539 papi_process_data config = {.counter_data = std::move(counters2values), .event_set = event_set};
541 units2papi_setup.insert(std::make_pair(unit_name, std::move(config)));
545 if (process_count == 0){
546 process_count = SIMIX_process_count();
549 smpi_universe_size = process_count;
550 process_data = new smpi_process_data_t[process_count];
551 for (i = 0; i < process_count; i++) {
552 process_data[i] = new s_smpi_process_data_t;
553 process_data[i]->argc = nullptr;
554 process_data[i]->argv = nullptr;
555 process_data[i]->mailbox = simcall_mbox_create(get_mailbox_name(name, i));
556 process_data[i]->mailbox_small = simcall_mbox_create(get_mailbox_name_small(name, i));
557 process_data[i]->mailboxes_mutex = xbt_mutex_init();
558 process_data[i]->timer = xbt_os_timer_new();
560 MC_ignore_heap(process_data[i]->timer, xbt_os_timer_size());
561 process_data[i]->comm_self = MPI_COMM_NULL;
562 process_data[i]->comm_intra = MPI_COMM_NULL;
563 process_data[i]->comm_world = nullptr;
564 process_data[i]->state = SMPI_UNINITIALIZED;
565 process_data[i]->sampling = 0;
566 process_data[i]->finalization_barrier = nullptr;
567 process_data[i]->return_value = 0;
570 if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
571 // TODO: Implement host/process/thread based counters. This implementation
572 // just always takes the values passed via "default", like this:
573 // "default:COUNTER1:COUNTER2:COUNTER3;".
574 auto it = units2papi_setup.find(papi_default_config_name);
575 if (it != units2papi_setup.end()) {
576 process_data[i]->papi_event_set = it->second.event_set;
577 process_data[i]->papi_counter_data = it->second.counter_data;
578 XBT_DEBUG("Setting PAPI set for process %i", i);
580 process_data[i]->papi_event_set = PAPI_NULL;
581 XBT_DEBUG("No PAPI set for process %i", i);
586 //if the process was launched through smpirun script we generate a global mpi_comm_world
587 //if not, we let MPI_COMM_NULL, and the comm world will be private to each mpi instance
589 group = smpi_group_new(process_count);
590 MPI_COMM_WORLD = smpi_comm_new(group, nullptr);
591 MPI_Attr_put(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, reinterpret_cast<void *>(process_count));
592 xbt_bar_t bar=xbt_barrier_init(process_count);
594 for (i = 0; i < process_count; i++) {
595 smpi_group_set_mapping(group, i, i);
596 process_data[i]->finalization_barrier = bar;
601 void smpi_global_destroy()
603 int count = smpi_process_count();
606 smpi_bench_destroy();
607 if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
608 while (smpi_group_unuse(smpi_comm_group(MPI_COMM_WORLD)) > 0);
609 xbt_barrier_destroy(process_data[0]->finalization_barrier);
611 smpi_deployment_cleanup_instances();
613 for (i = 0; i < count; i++) {
614 if(process_data[i]->comm_self!=MPI_COMM_NULL){
615 smpi_comm_destroy(process_data[i]->comm_self);
617 if(process_data[i]->comm_intra!=MPI_COMM_NULL){
618 smpi_comm_destroy(process_data[i]->comm_intra);
620 xbt_os_timer_free(process_data[i]->timer);
621 xbt_mutex_destroy(process_data[i]->mailboxes_mutex);
622 delete process_data[i];
624 delete[] process_data;
625 process_data = nullptr;
627 if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
628 smpi_comm_cleanup_smp(MPI_COMM_WORLD);
629 smpi_comm_cleanup_attributes(MPI_COMM_WORLD);
630 if(smpi_coll_cleanup_callback!=nullptr)
631 smpi_coll_cleanup_callback();
632 xbt_free(MPI_COMM_WORLD);
635 MPI_COMM_WORLD = MPI_COMM_NULL;
637 if (!MC_is_active()) {
638 xbt_os_timer_free(global_timer);
641 xbt_free(index_to_process_data);
642 if(smpi_privatize_global_variables)
643 smpi_destroy_global_memory_segments();
649 void __attribute__ ((weak)) user_main_()
651 xbt_die("Should not be in this smpi_simulated_main");
655 int __attribute__ ((weak)) smpi_simulated_main_(int argc, char **argv)
657 smpi_process_init(&argc, &argv);
662 inline static int smpi_main_wrapper(int argc, char **argv){
663 int ret = smpi_simulated_main_(argc,argv);
665 XBT_WARN("SMPI process did not return 0. Return value : %d", ret);
666 smpi_process_data()->return_value=ret;
671 int __attribute__ ((weak)) main(int argc, char **argv)
673 return smpi_main(smpi_main_wrapper, argc, argv);
679 static void smpi_init_logs(){
681 /* Connect log categories. See xbt/log.c */
683 XBT_LOG_CONNECT(smpi); /* Keep this line as soon as possible in this function: xbt_log_appender_file.c depends on it
684 DO NOT connect this in XBT or so, or it will be useless to xbt_log_appender_file.c */
685 XBT_LOG_CONNECT(instr_smpi);
686 XBT_LOG_CONNECT(smpi_base);
687 XBT_LOG_CONNECT(smpi_bench);
688 XBT_LOG_CONNECT(smpi_coll);
689 XBT_LOG_CONNECT(smpi_colls);
690 XBT_LOG_CONNECT(smpi_comm);
691 XBT_LOG_CONNECT(smpi_dvfs);
692 XBT_LOG_CONNECT(smpi_group);
693 XBT_LOG_CONNECT(smpi_kernel);
694 XBT_LOG_CONNECT(smpi_mpi);
695 XBT_LOG_CONNECT(smpi_mpi_dt);
696 XBT_LOG_CONNECT(smpi_pmpi);
697 XBT_LOG_CONNECT(smpi_replay);
698 XBT_LOG_CONNECT(smpi_rma);
702 static void smpi_init_options(){
703 int gather_id = find_coll_description(mpi_coll_gather_description, xbt_cfg_get_string("smpi/gather"),"gather");
704 mpi_coll_gather_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int, MPI_Datatype, int, MPI_Comm)>
705 (mpi_coll_gather_description[gather_id].coll);
707 int allgather_id = find_coll_description(mpi_coll_allgather_description,
708 xbt_cfg_get_string("smpi/allgather"),"allgather");
709 mpi_coll_allgather_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int, MPI_Datatype, MPI_Comm)>
710 (mpi_coll_allgather_description[allgather_id].coll);
712 int allgatherv_id = find_coll_description(mpi_coll_allgatherv_description,
713 xbt_cfg_get_string("smpi/allgatherv"),"allgatherv");
714 mpi_coll_allgatherv_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int *, int *, MPI_Datatype, MPI_Comm)>
715 (mpi_coll_allgatherv_description[allgatherv_id].coll);
717 int allreduce_id = find_coll_description(mpi_coll_allreduce_description,
718 xbt_cfg_get_string("smpi/allreduce"),"allreduce");
719 mpi_coll_allreduce_fun = reinterpret_cast<int (*)(void *sbuf, void *rbuf, int rcount, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)>
720 (mpi_coll_allreduce_description[allreduce_id].coll);
722 int alltoall_id = find_coll_description(mpi_coll_alltoall_description,
723 xbt_cfg_get_string("smpi/alltoall"),"alltoall");
724 mpi_coll_alltoall_fun = reinterpret_cast<int (*)(void *, int, MPI_Datatype, void *, int, MPI_Datatype, MPI_Comm)>
725 (mpi_coll_alltoall_description[alltoall_id].coll);
727 int alltoallv_id = find_coll_description(mpi_coll_alltoallv_description,
728 xbt_cfg_get_string("smpi/alltoallv"),"alltoallv");
729 mpi_coll_alltoallv_fun = reinterpret_cast<int (*)(void *, int *, int *, MPI_Datatype, void *, int *, int *, MPI_Datatype, MPI_Comm)>
730 (mpi_coll_alltoallv_description[alltoallv_id].coll);
732 int bcast_id = find_coll_description(mpi_coll_bcast_description, xbt_cfg_get_string("smpi/bcast"),"bcast");
733 mpi_coll_bcast_fun = reinterpret_cast<int (*)(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm com)>
734 (mpi_coll_bcast_description[bcast_id].coll);
736 int reduce_id = find_coll_description(mpi_coll_reduce_description, xbt_cfg_get_string("smpi/reduce"),"reduce");
737 mpi_coll_reduce_fun = reinterpret_cast<int (*)(void *buf, void *rbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)>
738 (mpi_coll_reduce_description[reduce_id].coll);
740 int reduce_scatter_id =
741 find_coll_description(mpi_coll_reduce_scatter_description,
742 xbt_cfg_get_string("smpi/reduce-scatter"),"reduce_scatter");
743 mpi_coll_reduce_scatter_fun = reinterpret_cast<int (*)(void *sbuf, void *rbuf, int *rcounts,MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)>
744 (mpi_coll_reduce_scatter_description[reduce_scatter_id].coll);
746 int scatter_id = find_coll_description(mpi_coll_scatter_description, xbt_cfg_get_string("smpi/scatter"),"scatter");
747 mpi_coll_scatter_fun = reinterpret_cast<int (*)(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf,int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)>
748 (mpi_coll_scatter_description[scatter_id].coll);
750 int barrier_id = find_coll_description(mpi_coll_barrier_description, xbt_cfg_get_string("smpi/barrier"),"barrier");
751 mpi_coll_barrier_fun = reinterpret_cast<int (*)(MPI_Comm comm)>
752 (mpi_coll_barrier_description[barrier_id].coll);
754 smpi_coll_cleanup_callback=nullptr;
755 smpi_cpu_threshold = xbt_cfg_get_double("smpi/cpu-threshold");
756 smpi_running_power = xbt_cfg_get_double("smpi/running-power");
757 smpi_privatize_global_variables = xbt_cfg_get_boolean("smpi/privatize-global-variables");
758 if (smpi_cpu_threshold < 0)
759 smpi_cpu_threshold = DBL_MAX;
762 int smpi_main(int (*realmain) (int argc, char *argv[]), int argc, char *argv[])
764 srand(SMPI_RAND_SEED);
766 if (getenv("SMPI_PRETEND_CC") != nullptr) {
767 /* Hack to ensure that smpicc can pretend to be a simple compiler. Particularly handy to pass it to the
768 * configuration tools */
773 TRACE_global_init(&argc, argv);
774 TRACE_add_start_function(TRACE_smpi_alloc);
775 TRACE_add_end_function(TRACE_smpi_release);
777 SIMIX_global_init(&argc, argv);
778 MSG_init(&argc,argv);
780 SMPI_switch_data_segment = smpi_switch_data_segment;
784 // parse the platform file: get the host list
785 SIMIX_create_environment(argv[1]);
786 SIMIX_comm_set_copy_data_callback(&smpi_comm_copy_buffer_callback);
787 SIMIX_function_register_default(realmain);
788 SIMIX_launch_application(argv[2]);
792 smpi_check_options();
794 if(smpi_privatize_global_variables)
795 smpi_initialize_global_memory_segments();
797 /* Clean IO before the run */
801 if (MC_is_active()) {
807 xbt_os_walltimer_stop(global_timer);
808 if (xbt_cfg_get_boolean("smpi/display-timing")){
809 double global_time = xbt_os_timer_elapsed(global_timer);
810 XBT_INFO("Simulated time: %g seconds. \n\n"
811 "The simulation took %g seconds (after parsing and platform setup)\n"
812 "%g seconds were actual computation of the application",
813 SIMIX_get_clock(), global_time , smpi_total_benched_time);
815 if (smpi_total_benched_time/global_time>=0.75)
816 XBT_INFO("More than 75%% of the time was spent inside the application code.\n"
817 "You may want to use sampling functions or trace replay to reduce this.");
820 int count = smpi_process_count();
822 for (i = 0; i < count; i++) {
823 if(process_data[i]->return_value!=0){
824 ret=process_data[i]->return_value;//return first non 0 value
828 smpi_global_destroy();
835 // This function can be called from extern file, to initialize logs, options, and processes of smpi
836 // without the need of smpirun
841 smpi_check_options();
842 if (TRACE_is_enabled() && TRACE_is_configured())
844 if(smpi_privatize_global_variables)
845 smpi_initialize_global_memory_segments();
848 void SMPI_finalize(){
849 smpi_global_destroy();