XBT_PUBLIC_DATA( MPI_Group ) MPI_GROUP_EMPTY;
-typedef SMPI_Comm *MPI_Comm;
+typedef SMPI_Comm* MPI_Comm;
#define MPI_COMM_NULL ((MPI_Comm)NULL)
XBT_PUBLIC_DATA( MPI_Comm ) MPI_COMM_WORLD;
#define MPI_COMM_SELF smpi_process_comm_self()
-typedef SMPI_Request *MPI_Request;
+typedef SMPI_Request* MPI_Request;
#define MPIO_Request MPI_Request
#define MPI_REQUEST_NULL ((MPI_Request)NULL)
void set_privatized_region(smpi_privatization_region_t region);
smpi_privatization_region_t privatized_region();
int index();
- MPI_Comm comm_world();
smx_mailbox_t mailbox();
smx_mailbox_t mailbox_small();
xbt_mutex_t mailboxes_mutex();
xbt_os_timer_t timer();
void simulated_start();
double simulated_elapsed();
+ MPI_Comm comm_world();
MPI_Comm comm_self();
MPI_Comm comm_intra();
void set_comm_intra(MPI_Comm comm);
*/
void SMPI_app_instance_register(const char *name, xbt_main_func_t code, int num_processes)
{
- SIMIX_function_register(name, code);
+ if (code != nullptr) { // When started with smpirun, we will not execute a function
+ SIMIX_function_register(name, code);
+ }
static int already_called = 0;
if (not already_called) {
}
Instance instance(name, num_processes, process_count, MPI_COMM_NULL, MSG_barrier_init(num_processes));
+ MPI_Group group = new simgrid::smpi::Group(instance.size);
+ instance.comm_world = new simgrid::smpi::Comm(group, nullptr);
+ MPI_Attr_put(instance.comm_world, MPI_UNIVERSE_SIZE, reinterpret_cast<void*>(instance.size));
process_count+=num_processes;
Instance& instance = smpi_instances.at(instance_id);
- if (instance.comm_world == MPI_COMM_NULL) {
- MPI_Group group = new simgrid::smpi::Group(instance.size);
- instance.comm_world = new simgrid::smpi::Comm(group, nullptr);
- }
instance.present_processes++;
index_to_process_data[index] = instance.index + rank;
instance.comm_world->group()->set_mapping(index, rank);
void smpi_deployment_cleanup_instances(){
for (auto const& item : smpi_instances) {
Instance instance = item.second;
- if (instance.comm_world != MPI_COMM_NULL)
- delete instance.comm_world->group();
- delete instance.comm_world;
MSG_barrier_destroy(instance.finalization_barrier);
+ simgrid::smpi::Comm::destroy(instance.comm_world);
}
+ smpi_instances.clear();
}
int* index_to_process_data = nullptr;
extern double smpi_total_benched_time;
xbt_os_timer_t global_timer;
+/**
+ * Setting MPI_COMM_WORLD to MPI_COMM_UNINITIALIZED (it's a variable)
+ * is important because the implementation of MPI_Comm checks
+ * "this == MPI_COMM_UNINITIALIZED"? If yes, it uses smpi_process()->comm_world()
+ * instead of "this".
+ * This is basically how we only have one global variable but all processes have
+ * different communicators (basically, the one their SMPI instance uses).
+ *
+ * See smpi_comm.cpp and the functions therein for details.
+ */
MPI_Comm MPI_COMM_WORLD = MPI_COMM_UNINITIALIZED;
MPI_Errhandler *MPI_ERRORS_RETURN = nullptr;
MPI_Errhandler *MPI_ERRORS_ARE_FATAL = nullptr;
MPI_Errhandler *MPI_ERRHANDLER_NULL = nullptr;
+// No instance gets manually created; check also the smpirun.in script as
+// this default name is used there as well (when the <actor> tag is generated).
+static const char* smpi_default_instance_name = "smpirun";
static simgrid::config::Flag<double> smpi_wtime_sleep(
"smpi/wtime", "Minimum time to inject inside a call to MPI_Wtime", 0.0);
static simgrid::config::Flag<double> smpi_init_sleep(
}
#endif
- int smpirun = 0;
- msg_bar_t finalization_barrier = nullptr;
- if (process_count == 0){
- process_count = SIMIX_process_count();
- smpirun=1;
- finalization_barrier = MSG_barrier_init(process_count);
+ if (index_to_process_data == nullptr) {
+ index_to_process_data = new int[SIMIX_process_count()];
+ }
+
+ bool smpirun = 0;
+ if (process_count == 0) { // The program has been dispatched but no other
+ // SMPI instances have been registered. We're using smpirun.
+ smpirun = true;
+ SMPI_app_instance_register(smpi_default_instance_name, nullptr,
+ SIMIX_process_count()); // This call has a side effect on process_count...
+ MPI_COMM_WORLD = *smpi_deployment_comm_world(smpi_default_instance_name);
}
smpi_universe_size = process_count;
process_data = new simgrid::smpi::Process*[process_count];
for (int i = 0; i < process_count; i++) {
- process_data[i] = new simgrid::smpi::Process(i, finalization_barrier);
- }
- //if the process was launched through smpirun script we generate a global mpi_comm_world
- //if not, we let MPI_COMM_NULL, and the comm world will be private to each mpi instance
- MPI_Group group;
- if (smpirun) {
- group = new simgrid::smpi::Group(process_count);
- MPI_COMM_WORLD = new simgrid::smpi::Comm(group, nullptr);
- MPI_Attr_put(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, reinterpret_cast<void *>(process_count));
-
- for (int i = 0; i < process_count; i++)
- group->set_mapping(i, i);
+ if (smpirun) {
+ process_data[i] = new simgrid::smpi::Process(i, smpi_deployment_finalization_barrier(smpi_default_instance_name));
+ smpi_deployment_register_process(smpi_default_instance_name, i, i);
+ } else {
+ // TODO We can pass a nullptr here because Process::set_data() assigns the
+ // barrier from the instance anyway. This is ugly and should be changed
+ process_data[i] = new simgrid::smpi::Process(i, nullptr);
+ }
}
}
{
smpi_bench_destroy();
smpi_shared_destroy();
- if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
- delete MPI_COMM_WORLD->group();
- MSG_barrier_destroy(process_data[0]->finalization_barrier());
- }else{
- smpi_deployment_cleanup_instances();
- }
+ smpi_deployment_cleanup_instances();
for (int i = 0, count = smpi_process_count(); i < count; i++) {
if(process_data[i]->comm_self()!=MPI_COMM_NULL){
simgrid::smpi::Comm::destroy(process_data[i]->comm_self());
delete[] process_data;
process_data = nullptr;
- if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
- MPI_COMM_WORLD->cleanup_smp();
- MPI_COMM_WORLD->cleanup_attr<simgrid::smpi::Comm>();
- if(simgrid::smpi::Colls::smpi_coll_cleanup_callback!=nullptr)
- simgrid::smpi::Colls::smpi_coll_cleanup_callback();
- delete MPI_COMM_WORLD;
- }
+ if (simgrid::smpi::Colls::smpi_coll_cleanup_callback != nullptr)
+ simgrid::smpi::Colls::smpi_coll_cleanup_callback();
MPI_COMM_WORLD = MPI_COMM_NULL;
void Process::set_data(int index, int* argc, char*** argv)
{
char* instance_id = (*argv)[1];
- comm_world_ = smpi_deployment_comm_world(instance_id);
- msg_bar_t bar = smpi_deployment_finalization_barrier(instance_id);
- if (bar!=nullptr) // don't overwrite the default one
+ comm_world_ = smpi_deployment_comm_world(instance_id);
+ msg_bar_t bar = smpi_deployment_finalization_barrier(instance_id);
+ if (bar != nullptr) // don't overwrite the current one if the instance has none
finalization_barrier_ = bar;
instance_id_ = instance_id;
- index_ = index;
+ index_ = index;
static_cast<simgrid::msg::ActorExt*>(SIMIX_process_self()->userdata)->data = this;
fi
echo " <actor host=\"${host}\" function=\"$i\"> <!-- function name used only for logging -->
- <argument value=\"1\"/> <!-- instance -->
+ <argument value=\"smpirun\"/> <!-- instance -->
<argument value=\"$i\"/> <!-- rank -->" >> ${APPLICATIONTMP}
if [ ${REPLAY} = 1 ]; then
if [ ${NUMTRACES} -gt 1 ]; then
return -1;
}
}
+}
+
+namespace {
/* Turn something like "1-4,6,9-11" into the vector {1,2,3,4,6,9,10,11} */
-static std::vector<int>* explodesRadical(std::string radicals)
+std::vector<int>* explodesRadical(std::string radicals)
{
std::vector<int>* exploded = new std::vector<int>();
return exploded;
}
-namespace {
class unit_scale : public std::unordered_map<std::string, double> {
public:
using std::unordered_map<std::string, double>::unordered_map;
}
}
}
-}
/* Note: field `unit' for the last element of parameter `units' should be nullptr. */
-static double surf_parse_get_value_with_unit(const char* string, const unit_scale& units, const char* entity_kind,
- std::string name, const char* error_msg, const char* default_unit)
+double surf_parse_get_value_with_unit(const char* string, const unit_scale& units, const char* entity_kind,
+ std::string name, const char* error_msg, const char* default_unit)
{
char* ptr;
errno = 0;
surf_parse_error(std::string("unknown unit: ") + ptr);
return res * u->second;
}
+}
+
+extern "C" {
double surf_parse_get_time(const char* string, const char* entity_kind, std::string name)
{