A
lgorithmique
N
umérique
D
istribuée
Public GIT Repository
projects
/
simgrid.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
please sonar
[simgrid.git]
/
src
/
smpi
/
internals
/
smpi_deployment.cpp
diff --git
a/src/smpi/internals/smpi_deployment.cpp
b/src/smpi/internals/smpi_deployment.cpp
index
407fe67
..
76052cf
100644
(file)
--- a/
src/smpi/internals/smpi_deployment.cpp
+++ b/
src/smpi/internals/smpi_deployment.cpp
@@
-7,6
+7,7
@@
#include "SmpiHost.hpp"
#include "private.hpp"
#include "simgrid/msg.h" /* barrier */
#include "SmpiHost.hpp"
#include "private.hpp"
#include "simgrid/msg.h" /* barrier */
+#include "simgrid/s4u/Engine.hpp"
#include "smpi_comm.hpp"
#include <map>
#include "smpi_comm.hpp"
#include <map>
@@
-20,31
+21,24
@@
public:
: name(name)
, size(max_no_processes)
, present_processes(0)
: name(name)
, size(max_no_processes)
, present_processes(0)
- , index(process_count)
, comm_world(comm)
, finalization_barrier(finalization_barrier)
, comm_world(comm)
, finalization_barrier(finalization_barrier)
- {
- }
+ { }
const char* name;
int size;
int present_processes;
const char* name;
int size;
int present_processes;
- int index; // Badly named. This should be "no_processes_when_registering" ;)
MPI_Comm comm_world;
msg_bar_t finalization_barrier;
};
}
}
MPI_Comm comm_world;
msg_bar_t finalization_barrier;
};
}
}
-namespace s4u {
-extern std::map<std::string, simgrid::s4u::Host*> host_list;
-}
}
using simgrid::smpi::app::Instance;
static std::map<std::string, Instance> smpi_instances;
extern int process_count; // How many processes have been allocated over all instances?
}
using simgrid::smpi::app::Instance;
static std::map<std::string, Instance> smpi_instances;
extern int process_count; // How many processes have been allocated over all instances?
-extern int* index_to_process_data;
/** \ingroup smpi_simulation
* \brief Registers a running instance of a MPI program.
/** \ingroup smpi_simulation
* \brief Registers a running instance of a MPI program.
@@
-56,44
+50,41
@@
extern int* index_to_process_data;
*/
void SMPI_app_instance_register(const char *name, xbt_main_func_t code, int num_processes)
{
*/
void SMPI_app_instance_register(const char *name, xbt_main_func_t code, int num_processes)
{
- SIMIX_function_register(name, code);
+ if (code != nullptr) { // When started with smpirun, we will not execute a function
+ SIMIX_function_register(name, code);
+ }
static int already_called = 0;
if (not already_called) {
already_called = 1;
static int already_called = 0;
if (not already_called) {
already_called = 1;
- for (auto const& item : simgrid::s4u::host_list) {
- simgrid::s4u::Host* host = item.second;
+ std::vector<simgrid::s4u::Host*> list;
+ simgrid::s4u::Engine::getInstance()->getHostList(&list);
+ for (auto const& host : list) {
host->extension_set(new simgrid::smpi::SmpiHost(host));
}
}
Instance instance(name, num_processes, process_count, MPI_COMM_NULL, MSG_barrier_init(num_processes));
host->extension_set(new simgrid::smpi::SmpiHost(host));
}
}
Instance instance(name, num_processes, process_count, MPI_COMM_NULL, MSG_barrier_init(num_processes));
+ MPI_Group group = new simgrid::smpi::Group(instance.size);
+ instance.comm_world = new simgrid::smpi::Comm(group, nullptr);
+ MPI_Attr_put(instance.comm_world, MPI_UNIVERSE_SIZE, reinterpret_cast<void*>(instance.size));
process_count+=num_processes;
smpi_instances.insert(std::pair<std::string, Instance>(name, instance));
}
process_count+=num_processes;
smpi_instances.insert(std::pair<std::string, Instance>(name, instance));
}
-//get the index of the process in the process_data array
-void smpi_deployment_register_process(const char* instance_id, int rank, int index)
+void smpi_deployment_register_process(const char* instance_id, int rank, simgrid::s4u::ActorPtr actor)
{
{
- if (smpi_instances.empty()) { // no instance registered, we probably used smpirun.
- index_to_process_data[index]=index;
+ if (smpi_instances.empty()) // no instance registered, we probably used smpirun.
return;
return;
- }
Instance& instance = smpi_instances.at(instance_id);
Instance& instance = smpi_instances.at(instance_id);
- if (instance.comm_world == MPI_COMM_NULL) {
- MPI_Group group = new simgrid::smpi::Group(instance.size);
- instance.comm_world = new simgrid::smpi::Comm(group, nullptr);
- }
instance.present_processes++;
instance.present_processes++;
- index_to_process_data[index] = instance.index + rank;
- instance.comm_world->group()->set_mapping(index, rank);
+ instance.comm_world->group()->set_mapping(actor, rank);
}
}
-//get the index of the process in the process_data array
MPI_Comm* smpi_deployment_comm_world(const char* instance_id)
{
if (smpi_instances.empty()) { // no instance registered, we probably used smpirun.
MPI_Comm* smpi_deployment_comm_world(const char* instance_id)
{
if (smpi_instances.empty()) { // no instance registered, we probably used smpirun.
@@
-115,9
+106,8
@@
msg_bar_t smpi_deployment_finalization_barrier(const char* instance_id)
void smpi_deployment_cleanup_instances(){
for (auto const& item : smpi_instances) {
Instance instance = item.second;
void smpi_deployment_cleanup_instances(){
for (auto const& item : smpi_instances) {
Instance instance = item.second;
- if (instance.comm_world != MPI_COMM_NULL)
- delete instance.comm_world->group();
- delete instance.comm_world;
MSG_barrier_destroy(instance.finalization_barrier);
MSG_barrier_destroy(instance.finalization_barrier);
+ simgrid::smpi::Comm::destroy(instance.comm_world);
}
}
+ smpi_instances.clear();
}
}