A
lgorithmique
N
umérique
D
istribuée
Public GIT Repository
projects
/
simgrid.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
boss not happy, partial revert
[simgrid.git]
/
src
/
smpi
/
mpi
/
smpi_comm.cpp
diff --git
a/src/smpi/mpi/smpi_comm.cpp
b/src/smpi/mpi/smpi_comm.cpp
index
b03c340
..
e8e893f
100644
(file)
--- a/
src/smpi/mpi/smpi_comm.cpp
+++ b/
src/smpi/mpi/smpi_comm.cpp
@@
-39,11
+39,12
@@
Comm::Comm(MPI_Group group, MPI_Topology topo, int smp, int in_id) : group_(grou
leaders_map_ = nullptr;
is_blocked_ = 0;
info_ = MPI_INFO_NULL;
leaders_map_ = nullptr;
is_blocked_ = 0;
info_ = MPI_INFO_NULL;
-
static int global_id_=0
;
+
errhandler_ = MPI_ERRORS_ARE_FATAL
;
//First creation of comm is done before SIMIX_run, so only do comms for others
if(in_id==MPI_UNDEFINED && smp==0 && this->rank()!=MPI_UNDEFINED ){
int id;
if(this->rank()==0){
//First creation of comm is done before SIMIX_run, so only do comms for others
if(in_id==MPI_UNDEFINED && smp==0 && this->rank()!=MPI_UNDEFINED ){
int id;
if(this->rank()==0){
+ static int global_id_ = 0;
id=global_id_;
global_id_++;
}
id=global_id_;
global_id_++;
}
@@
-106,6
+107,8
@@
int Comm::dup(MPI_Comm* newcomm){
//duplicate info if present
if(info_!=MPI_INFO_NULL)
(*newcomm)->info_ = new simgrid::smpi::Info(info_);
//duplicate info if present
if(info_!=MPI_INFO_NULL)
(*newcomm)->info_ = new simgrid::smpi::Info(info_);
+ //duplicate errhandler
+ (*newcomm)->set_errhandler(errhandler_);
return ret;
}
return ret;
}
@@
-272,7
+275,7
@@
MPI_Comm Comm::split(int color, int key)
group_root = group_out; /* Save root's group */
}
for (unsigned j = 0; j < rankmap.size(); j++) {
group_root = group_out; /* Save root's group */
}
for (unsigned j = 0; j < rankmap.size(); j++) {
- s4u::Actor
Ptr
actor = group->actor(rankmap[j].second);
+ s4u::Actor
*
actor = group->actor(rankmap[j].second);
group_out->set_mapping(actor, j);
}
MPI_Request* requests = xbt_new(MPI_Request, rankmap.size());
group_out->set_mapping(actor, j);
}
MPI_Request* requests = xbt_new(MPI_Request, rankmap.size());
@@
-337,12
+340,12
@@
void Comm::unref(Comm* comm){
MPI_Comm Comm::find_intra_comm(int * leader){
//get the indices of all processes sharing the same simix host
MPI_Comm Comm::find_intra_comm(int * leader){
//get the indices of all processes sharing the same simix host
- auto&
process_list = sg_host_self()->pimpl_->process
_list_;
+ auto&
actor_list = sg_host_self()->pimpl_->actor
_list_;
int intra_comm_size = 0;
int min_index = INT_MAX; // the minimum index will be the leader
int intra_comm_size = 0;
int min_index = INT_MAX; // the minimum index will be the leader
- for (auto& actor :
process
_list) {
+ for (auto& actor :
actor
_list) {
int index = actor.get_pid();
int index = actor.get_pid();
- if (this->group()->rank(actor.iface()) != MPI_UNDEFINED) { // Is this process in the current group?
+ if (this->group()->rank(actor.
c
iface()) != MPI_UNDEFINED) { // Is this process in the current group?
intra_comm_size++;
if (index < min_index)
min_index = index;
intra_comm_size++;
if (index < min_index)
min_index = index;
@@
-351,9
+354,9
@@
MPI_Comm Comm::find_intra_comm(int * leader){
XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size);
MPI_Group group_intra = new Group(intra_comm_size);
int i = 0;
XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size);
MPI_Group group_intra = new Group(intra_comm_size);
int i = 0;
- for (auto& actor :
process
_list) {
- if (this->group()->rank(actor.iface()) != MPI_UNDEFINED) {
- group_intra->set_mapping(actor.iface(), i);
+ for (auto& actor :
actor
_list) {
+ if (this->group()->rank(actor.
c
iface()) != MPI_UNDEFINED) {
+ group_intra->set_mapping(actor.
c
iface(), i);
i++;
}
}
i++;
}
}
@@
-381,7
+384,7
@@
void Comm::init_smp(){
// we need to switch as the called function may silently touch global variables
smpi_switch_data_segment(s4u::Actor::self());
}
// we need to switch as the called function may silently touch global variables
smpi_switch_data_segment(s4u::Actor::self());
}
- //
identify neighbou
rs in comm
+ //
identify neighbo
rs in comm
MPI_Comm comm_intra = find_intra_comm(&leader);
MPI_Comm comm_intra = find_intra_comm(&leader);
@@
-423,7
+426,7
@@
void Comm::init_smp(){
if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && this!=MPI_COMM_WORLD){
//create leader_communicator
for (i=0; i< leader_group_size;i++)
if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && this!=MPI_COMM_WORLD){
//create leader_communicator
for (i=0; i< leader_group_size;i++)
- leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]), i);
+ leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i])
.get()
, i);
leader_comm = new Comm(leaders_group, nullptr,1);
this->set_leaders_comm(leader_comm);
this->set_intra_comm(comm_intra);
leader_comm = new Comm(leaders_group, nullptr,1);
this->set_leaders_comm(leader_comm);
this->set_intra_comm(comm_intra);
@@
-431,7
+434,7
@@
void Comm::init_smp(){
// create intracommunicator
}else{
for (i=0; i< leader_group_size;i++)
// create intracommunicator
}else{
for (i=0; i< leader_group_size;i++)
- leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]), i);
+ leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i])
.get()
, i);
if(this->get_leaders_comm()==MPI_COMM_NULL){
leader_comm = new Comm(leaders_group, nullptr,1);
if(this->get_leaders_comm()==MPI_COMM_NULL){
leader_comm = new Comm(leaders_group, nullptr,1);
@@
-547,6
+550,16
@@
void Comm::set_info(MPI_Info info){
info_=info;
}
info_=info;
}
+MPI_Errhandler Comm::errhandler(){
+ return errhandler_;
+}
+
+void Comm::set_errhandler(MPI_Errhandler errhandler){
+ errhandler_=errhandler;
+ if(errhandler_!= MPI_ERRHANDLER_NULL)
+ errhandler->ref();
+}
+
MPI_Comm Comm::split_type(int type, int /*key*/, MPI_Info)
{
//MPI_UNDEFINED can be given to some nodes... but we need them to still perform the smp part which is collective
MPI_Comm Comm::split_type(int type, int /*key*/, MPI_Info)
{
//MPI_UNDEFINED can be given to some nodes... but we need them to still perform the smp part which is collective