+/**
+ * @brief Uses shm_open to get a temporary shm, and returns its file descriptor.
+ */
+int smpi_temp_shm_get()
+{
+ constexpr unsigned VAL_MASK = 0xffffffffUL;
+ static unsigned prev_val = VAL_MASK;
+ char shmname[32]; // cannot be longer than PSHMNAMLEN = 31 on macOS (shm_open raises ENAMETOOLONG otherwise)
+ int fd;
+
+ for (unsigned i = (prev_val + 1) & VAL_MASK; i != prev_val; i = (i + 1) & VAL_MASK) {
+ snprintf(shmname, sizeof(shmname), "/smpi-buffer-%016x", i);
+ fd = shm_open(shmname, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR);
+ if (fd != -1 || errno != EEXIST) {
+ prev_val = i;
+ break;
+ }
+ }
+ if (fd < 0) {
+ if (errno == EMFILE) {
+ xbt_die("Impossible to create temporary file for memory mapping: %s\n\
+The shm_open() system call failed with the EMFILE error code (too many files). \n\n\
+This means that you reached the system limits concerning the amount of files per process. \
+This is not a surprise if you are trying to virtualize many processes on top of SMPI. \
+Don't panic -- you should simply increase your system limits and try again. \n\n\
+First, check what your limits are:\n\
+ cat /proc/sys/fs/file-max # Gives you the system-wide limit\n\
+ ulimit -Hn # Gives you the per process hard limit\n\
+ ulimit -Sn # Gives you the per process soft limit\n\
+ cat /proc/self/limits # Displays any per-process limitation (including the one given above)\n\n\
+If one of these values is less than the amount of MPI processes that you try to run, then you got the explanation of this error. \
+Ask the Internet about tutorials on how to increase the files limit such as: https://rtcamp.com/tutorials/linux/increase-open-files-limit/",
+ strerror(errno));
+ }
+ xbt_die("Impossible to create temporary file for memory mapping. shm_open: %s", strerror(errno));
+ }
+ XBT_DEBUG("Got temporary shm %s (fd = %d)", shmname, fd);
+ if (shm_unlink(shmname) < 0)
+ XBT_WARN("Could not early unlink %s. shm_unlink: %s", shmname, strerror(errno));
+ return fd;
+}