1 /* Copyright (c) 2007, 2009-2017. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
8 #include <unordered_map>
11 #include "src/internal_config.h"
13 #include "private.hpp"
16 #include "xbt/sysdep.h"
18 #include "surf/surf.h"
19 #include "simgrid/sg_config.h"
20 #include "simgrid/modelchecker.h"
21 #include "src/mc/mc_replay.h"
23 #include <sys/types.h>
30 #include <math.h> // sqrt
40 #define MAP_ANONYMOUS MAP_ANON
43 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_bench, smpi, "Logging specific to SMPI (benchmarking)");
45 /* Shared allocations are handled through shared memory segments.
46 * Associated data and metadata are used as follows:
49 * `allocs' dict ---- -.
50 * ---------- shared_data_t shared_metadata_t / | | |
51 * .->| <name> | ---> -------------------- <--. ----------------- | | | |
52 * | ---------- | fd of <name> | | | size of mmap | --| | | |
53 * | | count (2) | |-- | data | \ | | |
54 * `----------------- | <name> | | ----------------- ---- |
55 * -------------------- | ^ |
57 * | | `allocs_metadata' dict |
58 * | | ---------------------- |
59 * | `-- | <addr of mmap #1> |<-'
60 * | .-- | <addr of mmap #2> |<-.
61 * | | ---------------------- |
67 * | shared_metadata_t / | |
68 * | ----------------- | | |
69 * | | size of mmap | --| | |
71 * ----------------- | | |
76 #define PTR_STRLEN (2 + 2 * sizeof(void*) + 1)
78 xbt_dict_t samples = nullptr; /* Allocated on first use */
79 xbt_dict_t calls = nullptr; /* Allocated on first use */
81 double smpi_cpu_threshold;
82 double smpi_host_speed;
84 int smpi_loaded_page = -1;
85 char* smpi_start_data_exe = nullptr;
86 int smpi_size_data_exe = 0;
87 bool smpi_privatize_global_variables;
88 double smpi_total_benched_time = 0;
89 smpi_privatisation_region_t smpi_privatisation_regions;
93 /** Some location in the source code
95 * This information is used by SMPI_SHARED_MALLOC to allocate some shared memory for all simulated processes.
97 class smpi_source_location {
99 smpi_source_location(const char* filename, int line)
100 : filename(xbt_strdup(filename)), filename_length(strlen(filename)), line(line) {}
102 /** Pointer to a static string containing the file name */
103 char* filename = nullptr;
104 int filename_length = 0;
107 bool operator==(smpi_source_location const& that) const
109 return filename_length == that.filename_length
111 && std::memcmp(filename, that.filename, filename_length) == 0;
113 bool operator!=(smpi_source_location const& that) const
115 return !(*this == that);
124 class hash<smpi_source_location> {
126 typedef smpi_source_location argument_type;
127 typedef std::size_t result_type;
128 result_type operator()(smpi_source_location const& loc) const
130 return xbt_str_hash_ext(loc.filename, loc.filename_length)
131 ^ xbt_str_hash_ext((const char*) &loc.line, sizeof(loc.line));
144 std::unordered_map<smpi_source_location, shared_data_t> allocs;
145 typedef std::unordered_map<smpi_source_location, shared_data_t>::value_type shared_data_key_type;
149 shared_data_key_type* data;
152 std::unordered_map<void*, shared_metadata_t> allocs_metadata;
156 static size_t shm_size(int fd) {
159 if(fstat(fd, &st) < 0) {
160 xbt_die("Could not stat fd %d: %s", fd, strerror(errno));
162 return static_cast<size_t>(st.st_size);
166 static void* shm_map(int fd, size_t size, shared_data_key_type* data) {
167 char loc[PTR_STRLEN];
168 shared_metadata_t meta;
170 if(size > shm_size(fd) && (ftruncate(fd, static_cast<off_t>(size)) < 0)) {
171 xbt_die("Could not truncate fd %d to %zu: %s", fd, size, strerror(errno));
174 void* mem = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
175 if(mem == MAP_FAILED) {
177 "Failed to map fd %d with size %zu: %s\n"
178 "If you are running a lot of ranks, you may be exceeding the amount of mappings allowed per process.\n"
179 "On Linux systems, change this value with sudo sysctl -w vm.max_map_count=newvalue (default value: 65536)\n"
180 "Please see http://simgrid.gforge.inria.fr/simgrid/latest/doc/html/options.html#options_virt for more info.",
181 fd, size, strerror(errno));
183 snprintf(loc, PTR_STRLEN, "%p", mem);
186 allocs_metadata[mem] = meta;
187 XBT_DEBUG("MMAP %zu to %p", size, mem);
192 void smpi_bench_destroy()
195 allocs_metadata.clear();
196 xbt_dict_free(&samples);
197 xbt_dict_free(&calls);
200 extern "C" XBT_PUBLIC(void) smpi_execute_flops_(double *flops);
201 void smpi_execute_flops_(double *flops)
203 smpi_execute_flops(*flops);
206 extern "C" XBT_PUBLIC(void) smpi_execute_(double *duration);
207 void smpi_execute_(double *duration)
209 smpi_execute(*duration);
212 void smpi_execute_flops(double flops) {
213 XBT_DEBUG("Handle real computation time: %f flops", flops);
214 smx_activity_t action = simcall_execution_start("computation", flops, 1, 0);
215 simcall_set_category (action, TRACE_internal_smpi_get_category());
216 simcall_execution_wait(action);
217 smpi_switch_data_segment(smpi_process_index());
220 void smpi_execute(double duration)
222 if (duration >= smpi_cpu_threshold) {
223 XBT_DEBUG("Sleep for %g to handle real computation time", duration);
224 double flops = duration * smpi_host_speed;
225 int rank = smpi_process_index();
226 instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
227 extra->type=TRACING_COMPUTING;
228 extra->comp_size=flops;
229 TRACE_smpi_computing_in(rank, extra);
231 smpi_execute_flops(flops);
233 TRACE_smpi_computing_out(rank);
236 XBT_DEBUG("Real computation took %g while option smpi/cpu_threshold is set to %g => ignore it",
237 duration, smpi_cpu_threshold);
241 void smpi_bench_begin()
243 if (smpi_privatize_global_variables) {
244 smpi_switch_data_segment(smpi_process_index());
247 if (MC_is_active() || MC_record_replay_is_active())
251 if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
252 int event_set = smpi_process_papi_event_set();
253 // PAPI_start sets everything to 0! See man(3) PAPI_start
254 if (PAPI_LOW_LEVEL_INITED == PAPI_is_initialized()) {
255 if (PAPI_start(event_set) != PAPI_OK) {
256 // TODO This needs some proper handling.
257 XBT_CRITICAL("Could not start PAPI counters.\n");
263 xbt_os_threadtimer_start(smpi_process_timer());
266 void smpi_bench_end()
268 if (MC_is_active() || MC_record_replay_is_active())
272 xbt_os_timer_t timer = smpi_process_timer();
273 xbt_os_threadtimer_stop(timer);
277 * An MPI function has been called and now is the right time to update
278 * our PAPI counters for this process.
280 if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
281 papi_counter_t& counter_data = smpi_process_papi_counters();
282 int event_set = smpi_process_papi_event_set();
283 std::vector<long long> event_values = std::vector<long long>(counter_data.size());
285 if (PAPI_stop(event_set, &event_values[0]) != PAPI_OK) { // Error
286 XBT_CRITICAL("Could not stop PAPI counters.\n");
289 for (unsigned int i = 0; i < counter_data.size(); i++) {
290 counter_data[i].second += event_values[i];
291 // XBT_DEBUG("[%i] PAPI: Counter %s: Value is now %lli (got increment by %lli\n", smpi_process_index(),
292 // counter_data[i].first.c_str(), counter_data[i].second, event_values[i]);
298 if (smpi_process_get_sampling()) {
299 XBT_CRITICAL("Cannot do recursive benchmarks.");
300 XBT_CRITICAL("Are you trying to make a call to MPI within a SMPI_SAMPLE_ block?");
301 xbt_backtrace_display_current();
302 xbt_die("Aborting.");
305 if (xbt_cfg_get_string("smpi/comp-adjustment-file")[0] != '\0') { // Maybe we need to artificially speed up or slow
306 // down our computation based on our statistical analysis.
308 smpi_trace_call_location_t* loc = smpi_process_get_call_location();
309 std::string key = loc->get_composed_key();
310 std::unordered_map<std::string, double>::const_iterator it = location2speedup.find(key);
311 if (it != location2speedup.end()) {
312 speedup = it->second;
316 // Simulate the benchmarked computation unless disabled via command-line argument
317 if (xbt_cfg_get_boolean("smpi/simulate-computation")) {
318 smpi_execute(xbt_os_timer_elapsed(timer)/speedup);
322 if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0' && TRACE_smpi_is_enabled()) {
323 char container_name[INSTR_DEFAULT_STR_SIZE];
324 smpi_container(smpi_process_index(), container_name, INSTR_DEFAULT_STR_SIZE);
325 container_t container = PJ_container_get(container_name);
326 papi_counter_t& counter_data = smpi_process_papi_counters();
328 for (auto& pair : counter_data) {
329 new_pajeSetVariable(surf_get_clock(), container,
330 PJ_type_get(/* countername */ pair.first.c_str(), container->type), pair.second);
335 smpi_total_benched_time += xbt_os_timer_elapsed(timer);
338 /* Private sleep function used by smpi_sleep() and smpi_usleep() */
339 static unsigned int private_sleep(double secs)
343 XBT_DEBUG("Sleep for: %lf secs", secs);
344 int rank = smpi_comm_rank(MPI_COMM_WORLD);
345 instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
346 extra->type=TRACING_SLEEPING;
347 extra->sleep_duration=secs;
348 TRACE_smpi_sleeping_in(rank, extra);
350 simcall_process_sleep(secs);
352 TRACE_smpi_sleeping_out(rank);
358 unsigned int smpi_sleep(unsigned int secs)
360 return private_sleep(static_cast<double>(secs));
363 int smpi_usleep(useconds_t usecs)
365 return static_cast<int>(private_sleep(static_cast<double>(usecs) / 1000000.0));
368 #if _POSIX_TIMERS > 0
369 int smpi_nanosleep(const struct timespec *tp, struct timespec * t)
371 return static_cast<int>(private_sleep(static_cast<double>(tp->tv_sec + tp->tv_nsec / 1000000000.0)));
375 int smpi_gettimeofday(struct timeval *tv, void* tz)
378 double now = SIMIX_get_clock();
380 tv->tv_sec = static_cast<time_t>(now);
382 tv->tv_usec = static_cast<useconds_t>((now - tv->tv_sec) * 1e6);
384 tv->tv_usec = static_cast<suseconds_t>((now - tv->tv_sec) * 1e6);
391 #if _POSIX_TIMERS > 0
392 int smpi_clock_gettime(clockid_t clk_id, struct timespec *tp)
394 //there is only one time in SMPI, so clk_id is ignored.
396 double now = SIMIX_get_clock();
398 tp->tv_sec = static_cast<time_t>(now);
399 tp->tv_nsec = static_cast<long int>((now - tp->tv_sec) * 1e9);
406 extern double sg_surf_precision;
407 unsigned long long smpi_rastro_resolution ()
410 double resolution = (1/sg_surf_precision);
412 return static_cast<unsigned long long>(resolution);
415 unsigned long long smpi_rastro_timestamp ()
418 double now = SIMIX_get_clock();
420 unsigned long long sec = (unsigned long long)now;
421 unsigned long long pre = (now - sec) * smpi_rastro_resolution();
423 return static_cast<unsigned long long>(sec) * smpi_rastro_resolution() + pre;
426 /* ****************************** Functions related to the SMPI_SAMPLE_ macros ************************************/
428 double threshold; /* maximal stderr requested (if positive) */
429 double relstderr; /* observed stderr so far */
430 double mean; /* mean of benched times, to be used if the block is disabled */
431 double sum; /* sum of benched times (to compute the mean and stderr) */
432 double sum_pow2; /* sum of the square of the benched times (to compute the stderr) */
433 int iters; /* amount of requested iterations */
434 int count; /* amount of iterations done so far */
435 int benching; /* 1: we are benchmarking; 0: we have enough data, no bench anymore */
438 static char *sample_location(int global, const char *file, int line) {
440 return bprintf("%s:%d", file, line);
442 return bprintf("%s:%d:%d", file, line, smpi_process_index());
446 static int sample_enough_benchs(local_data_t *data) {
447 int res = data->count >= data->iters;
448 if (data->threshold>0.0) {
450 res = 0; // not enough data
451 if (data->relstderr > data->threshold)
452 res = 0; // stderr too high yet
454 XBT_DEBUG("%s (count:%d iter:%d stderr:%f thres:%f mean:%fs)",
455 (res?"enough benchs":"need more data"), data->count, data->iters, data->relstderr, data->threshold, data->mean);
459 void smpi_sample_1(int global, const char *file, int line, int iters, double threshold)
461 char *loc = sample_location(global, file, line);
463 smpi_bench_end(); /* Take time from previous, unrelated computation into account */
464 smpi_process_set_sampling(1);
466 if (samples==nullptr)
467 samples = xbt_dict_new_homogeneous(free);
469 local_data_t *data = static_cast<local_data_t *>(xbt_dict_get_or_null(samples, loc));
471 xbt_assert(threshold>0 || iters>0,
472 "You should provide either a positive amount of iterations to bench, or a positive maximal stderr (or both)");
473 data = static_cast<local_data_t *>( xbt_new(local_data_t, 1));
476 data->sum_pow2 = 0.0;
478 data->threshold = threshold;
479 data->benching = 1; // If we have no data, we need at least one
481 xbt_dict_set(samples, loc, data, nullptr);
482 XBT_DEBUG("XXXXX First time ever on benched nest %s.",loc);
484 if (data->iters != iters || data->threshold != threshold) {
485 XBT_ERROR("Asked to bench block %s with different settings %d, %f is not %d, %f. "
486 "How did you manage to give two numbers at the same line??",
487 loc, data->iters, data->threshold, iters,threshold);
491 // if we already have some data, check whether sample_2 should get one more bench or whether it should emulate
492 // the computation instead
493 data->benching = (sample_enough_benchs(data) == 0);
494 XBT_DEBUG("XXXX Re-entering the benched nest %s. %s",loc,
495 (data->benching?"more benching needed":"we have enough data, skip computes"));
500 int smpi_sample_2(int global, const char *file, int line)
502 char *loc = sample_location(global, file, line);
505 xbt_assert(samples, "Y U NO use SMPI_SAMPLE_* macros? Stop messing directly with smpi_sample_* functions!");
506 local_data_t *data = static_cast<local_data_t *>(xbt_dict_get(samples, loc));
507 XBT_DEBUG("sample2 %s",loc);
510 if (data->benching==1) {
511 // we need to run a new bench
512 XBT_DEBUG("benchmarking: count:%d iter:%d stderr:%f thres:%f; mean:%f",
513 data->count, data->iters, data->relstderr, data->threshold, data->mean);
516 // Enough data, no more bench (either we got enough data from previous visits to this benched nest, or we just
517 //ran one bench and need to bail out now that our job is done). Just sleep instead
518 XBT_DEBUG("No benchmark (either no need, or just ran one): count >= iter (%d >= %d) or stderr<thres (%f<=%f)."
519 " apply the %fs delay instead", data->count, data->iters, data->relstderr, data->threshold, data->mean);
520 smpi_execute(data->mean);
521 smpi_process_set_sampling(0);
522 res = 0; // prepare to capture future, unrelated computations
528 void smpi_sample_3(int global, const char *file, int line)
530 char *loc = sample_location(global, file, line);
532 xbt_assert(samples, "Y U NO use SMPI_SAMPLE_* macros? Stop messing directly with smpi_sample_* functions!");
533 local_data_t *data = static_cast<local_data_t *>(xbt_dict_get(samples, loc));
534 XBT_DEBUG("sample3 %s",loc);
537 if (data->benching==0)
540 // ok, benchmarking this loop is over
541 xbt_os_threadtimer_stop(smpi_process_timer());
545 double sample = xbt_os_timer_elapsed(smpi_process_timer());
547 data->sum_pow2 += sample * sample;
548 double n = static_cast<double>(data->count);
549 data->mean = data->sum / n;
550 data->relstderr = sqrt((data->sum_pow2 / n - data->mean * data->mean) / n) / data->mean;
551 if (sample_enough_benchs(data)==0) {
552 data->mean = sample; // Still in benching process; We want sample_2 to simulate the exact time of this loop
553 // occurrence before leaving, not the mean over the history
555 XBT_DEBUG("Average mean after %d steps is %f, relative standard error is %f (sample was %f)", data->count,
556 data->mean, data->relstderr, sample);
558 // That's enough for now, prevent sample_2 to run the same code over and over
564 void *smpi_shared_malloc(size_t size, const char *file, int line)
567 if (size > 0 && xbt_cfg_get_boolean("smpi/use-shared-malloc")){
569 smpi_source_location loc(file, line);
570 auto res = allocs.insert(std::make_pair(loc, shared_data_t()));
571 auto data = res.first;
573 // The insertion did not take place.
574 // Generate a shared memory name from the address of the shared_data:
575 char shmname[32]; // cannot be longer than PSHMNAMLEN = 31 on Mac OS X (shm_open raises ENAMETOOLONG otherwise)
576 snprintf(shmname, 31, "/shmalloc%p", &*data);
577 fd = shm_open(shmname, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
580 xbt_die("Please cleanup /dev/shm/%s", shmname);
582 xbt_die("An unhandled error occurred while opening %s. shm_open: %s", shmname, strerror(errno));
584 data->second.fd = fd;
585 data->second.count = 1;
586 mem = shm_map(fd, size, &*data);
587 if (shm_unlink(shmname) < 0) {
588 XBT_WARN("Could not early unlink %s. shm_unlink: %s", shmname, strerror(errno));
590 XBT_DEBUG("Mapping %s at %p through %d", shmname, mem, fd);
592 mem = shm_map(data->second.fd, size, &*data);
593 data->second.count++;
595 XBT_DEBUG("Shared malloc %zu in %p (metadata at %p)", size, mem, &*data);
597 mem = xbt_malloc(size);
598 XBT_DEBUG("Classic malloc %zu in %p", size, mem);
604 void smpi_shared_free(void *ptr)
606 char loc[PTR_STRLEN];
608 if (xbt_cfg_get_boolean("smpi/use-shared-malloc")){
609 snprintf(loc, PTR_STRLEN, "%p", ptr);
610 auto meta = allocs_metadata.find(ptr);
611 if (meta == allocs_metadata.end()) {
612 XBT_WARN("Cannot free: %p was not shared-allocated by SMPI - maybe its size was 0?", ptr);
615 shared_data_t* data = &meta->second.data->second;
616 if (munmap(ptr, meta->second.size) < 0) {
617 XBT_WARN("Unmapping of fd %d failed: %s", data->fd, strerror(errno));
620 if (data->count <= 0) {
622 allocs.erase(allocs.find(meta->second.data->first));
623 XBT_DEBUG("Shared free - with removal - of %p", ptr);
625 XBT_DEBUG("Shared free - no removal - of %p, count = %d", ptr, data->count);
628 XBT_DEBUG("Classic free of %p", ptr);
634 int smpi_shared_known_call(const char* func, const char* input)
636 char* loc = bprintf("%s:%s", func, input);
639 if (calls==nullptr) {
640 calls = xbt_dict_new_homogeneous(nullptr);
643 xbt_dict_get(calls, loc); /* Succeed or throw */
649 if (ex.category != not_found_error)
659 void* smpi_shared_get_call(const char* func, const char* input) {
660 char* loc = bprintf("%s:%s", func, input);
663 calls = xbt_dict_new_homogeneous(nullptr);
664 void* data = xbt_dict_get(calls, loc);
669 void* smpi_shared_set_call(const char* func, const char* input, void* data) {
670 char* loc = bprintf("%s:%s", func, input);
673 calls = xbt_dict_new_homogeneous(nullptr);
674 xbt_dict_set(calls, loc, data, nullptr);
680 /** Map a given SMPI privatization segment (make a SMPI process active) */
681 void smpi_switch_data_segment(int dest) {
682 if (smpi_loaded_page == dest)//no need to switch, we've already loaded the one we want
686 smpi_really_switch_data_segment(dest);
689 /** Map a given SMPI privatization segment (make a SMPI process active) even if SMPI thinks it is already active
691 * When doing a state restoration, the state of the restored variables might not be consistent with the state of the
692 * virtual memory. In this case, we to change the data segment.
694 void smpi_really_switch_data_segment(int dest)
696 if(smpi_size_data_exe == 0)//no need to switch
699 #if HAVE_PRIVATIZATION
700 if(smpi_loaded_page==-1){//initial switch, do the copy from the real page here
701 for (int i=0; i< smpi_process_count(); i++){
702 memcpy(smpi_privatisation_regions[i].address, TOPAGE(smpi_start_data_exe), smpi_size_data_exe);
706 // FIXME, cross-process support (mmap across process when necessary)
707 int current = smpi_privatisation_regions[dest].file_descriptor;
708 XBT_DEBUG("Switching data frame to the one of process %d", dest);
709 void* tmp = mmap (TOPAGE(smpi_start_data_exe), smpi_size_data_exe,
710 PROT_READ | PROT_WRITE, MAP_FIXED | MAP_SHARED, current, 0);
711 if (tmp != TOPAGE(smpi_start_data_exe))
712 xbt_die("Couldn't map the new region");
713 smpi_loaded_page = dest;
717 int smpi_is_privatisation_file(char* file)
719 return strncmp("/dev/shm/my-buffer-", file, std::strlen("/dev/shm/my-buffer-")) == 0;
722 void smpi_initialize_global_memory_segments()
725 #if !HAVE_PRIVATIZATION
726 smpi_privatize_global_variables=false;
727 xbt_die("You are trying to use privatization on a system that does not support it. Don't.");
731 smpi_get_executable_global_size();
733 XBT_DEBUG ("bss+data segment found : size %d starting at %p", smpi_size_data_exe, smpi_start_data_exe );
735 if (smpi_size_data_exe == 0){//no need to switch
736 smpi_privatize_global_variables=false;
740 smpi_privatisation_regions =
741 static_cast<smpi_privatisation_region_t>( xbt_malloc(smpi_process_count() * sizeof(struct s_smpi_privatisation_region)));
743 for (int i=0; i< smpi_process_count(); i++){
744 //create SIMIX_process_count() mappings of this size with the same data inside
746 void *address = nullptr;
751 snprintf(path, sizeof(path), "/smpi-buffer-%06x", rand()%0xffffff);
752 file_descriptor = shm_open(path, O_RDWR|O_CREAT|O_EXCL, S_IRUSR|S_IWUSR);
753 } while (file_descriptor == -1 && errno == EEXIST);
754 if (file_descriptor < 0) {
756 xbt_die("Impossible to create temporary file for memory mapping: %s\n\
757 The open() system call failed with the EMFILE error code (too many files). \n\n\
758 This means that you reached the system limits concerning the amount of files per process. \
759 This is not a surprise if you are trying to virtualize many processes on top of SMPI. \
760 Don't panic -- you should simply increase your system limits and try again. \n\n\
761 First, check what your limits are:\n\
762 cat /proc/sys/fs/file-max # Gives you the system-wide limit\n\
763 ulimit -Hn # Gives you the per process hard limit\n\
764 ulimit -Sn # Gives you the per process soft limit\n\
765 cat /proc/self/limits # Displays any per-process limitation (including the one given above)\n\n\
766 If one of these values is less than the amount of MPI processes that you try to run, then you got the explanation of this error. \
767 Ask the Internet about tutorials on how to increase the files limit such as: https://rtcamp.com/tutorials/linux/increase-open-files-limit/",
770 xbt_die("Impossible to create temporary file for memory mapping: %s",
774 status = ftruncate(file_descriptor, smpi_size_data_exe);
776 xbt_die("Impossible to set the size of the temporary file for memory mapping");
778 /* Ask for a free region */
779 address = mmap (nullptr, smpi_size_data_exe, PROT_READ | PROT_WRITE, MAP_SHARED, file_descriptor, 0);
780 if (address == MAP_FAILED)
781 xbt_die("Couldn't find a free region for memory mapping");
783 status = shm_unlink(path);
785 xbt_die("Impossible to unlink temporary file for memory mapping");
787 //initialize the values
788 memcpy(address, TOPAGE(smpi_start_data_exe), smpi_size_data_exe);
790 //store the address of the mapping for further switches
791 smpi_privatisation_regions[i].file_descriptor = file_descriptor;
792 smpi_privatisation_regions[i].address = address;
797 void smpi_destroy_global_memory_segments(){
798 if (smpi_size_data_exe == 0)//no need to switch
800 #if HAVE_PRIVATIZATION
801 for (int i=0; i< smpi_process_count(); i++) {
802 if (munmap(smpi_privatisation_regions[i].address, smpi_size_data_exe) < 0)
803 XBT_WARN("Unmapping of fd %d failed: %s", smpi_privatisation_regions[i].file_descriptor, strerror(errno));
804 close(smpi_privatisation_regions[i].file_descriptor);
806 xbt_free(smpi_privatisation_regions);
810 extern "C" { /** These functions will be called from the user code **/
811 smpi_trace_call_location_t* smpi_trace_get_call_location() {
812 return smpi_process_get_call_location();
815 void smpi_trace_set_call_location(const char* file, const int line) {
816 smpi_trace_call_location_t* loc = smpi_process_get_call_location();
818 loc->previous_filename = loc->filename;
819 loc->previous_linenumber = loc->linenumber;
820 loc->filename = file;
821 loc->linenumber = line;
825 * Required for Fortran bindings
827 void smpi_trace_set_call_location_(const char* file, int* line) {
828 smpi_trace_set_call_location(file, *line);
832 * Required for Fortran if -fsecond-underscore is activated
834 void smpi_trace_set_call_location__(const char* file, int* line) {
835 smpi_trace_set_call_location(file, *line);