-/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2010-2018. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "smpi_request.hpp"
#include "smpi_status.hpp"
#include "smpi_win.hpp"
+#include "src/simix/smx_host_private.hpp"
#include "src/simix/smx_private.hpp"
+
#include <algorithm>
#include <climits>
#include <vector>
simgrid::smpi::Comm mpi_MPI_COMM_UNINITIALIZED;
MPI_Comm MPI_COMM_UNINITIALIZED=&mpi_MPI_COMM_UNINITIALIZED;
+using simgrid::s4u::ActorPtr;
+
/* Support for cartesian topology was added, but there are 2 other types of topology, graph et dist graph. In order to
* support them, we have to add a field SMPI_Topo_type, and replace the MPI_Topology field by an union. */
Comm::Comm(MPI_Group group, MPI_Topology topo) : group_(group), topo_(topo)
{
- refcount_=1;
- topoType_ = MPI_INVALID_TOPO;
- intra_comm_ = MPI_COMM_NULL;
- leaders_comm_ = MPI_COMM_NULL;
- is_uniform_=1;
+ refcount_ = 1;
+ topoType_ = MPI_INVALID_TOPO;
+ intra_comm_ = MPI_COMM_NULL;
+ leaders_comm_ = MPI_COMM_NULL;
+ is_uniform_ = 1;
non_uniform_map_ = nullptr;
- leaders_map_ = nullptr;
- is_blocked_=0;
+ leaders_map_ = nullptr;
+ is_blocked_ = 0;
}
void Comm::destroy(Comm* comm)
}
int Comm::dup(MPI_Comm* newcomm){
- if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(smpi_process()->index());
+ if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
+ // we need to switch as the called function may silently touch global variables
+ smpi_switch_data_segment(simgrid::s4u::Actor::self());
}
MPI_Group cp = new Group(this->group());
- (*newcomm) = new Comm(cp, this->topo());
- int ret = MPI_SUCCESS;
+ (*newcomm) = new Comm(cp, this->topo());
+ int ret = MPI_SUCCESS;
if (not attributes()->empty()) {
int flag;
{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->rank();
- return group_->rank(smpi_process()->index());
+ return group_->rank(simgrid::s4u::Actor::self());
}
void Comm::get_name (char* name, int* len)
return;
}
if(this == MPI_COMM_WORLD) {
- strncpy(name, "WORLD",5);
+ strncpy(name, "WORLD", 6);
*len = 5;
} else {
*len = snprintf(name, MPI_MAX_NAME_STRING, "%p", this);
group_root = group_out; /* Save root's group */
}
for (unsigned j = 0; j < rankmap.size(); j++) {
- int index = group->index(rankmap[j].second);
- group_out->set_mapping(index, j);
+ ActorPtr actor = group->actor(rankmap[j].second);
+ group_out->set_mapping(actor, j);
}
MPI_Request* requests = xbt_new(MPI_Request, rankmap.size());
int reqs = 0;
Comm::unref(intra_comm_);
if (leaders_comm_ != MPI_COMM_NULL)
Comm::unref(leaders_comm_);
- if (non_uniform_map_ != nullptr)
- xbt_free(non_uniform_map_);
- if (leaders_map_ != nullptr)
- delete[] leaders_map_;
+ xbt_free(non_uniform_map_);
+ delete[] leaders_map_;
}
void Comm::unref(Comm* comm){
smpi_process()->set_replaying(false);
}
- if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(smpi_process()->index());
+ if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
+ // we need to switch as the called function may silently touch global variables
+ smpi_switch_data_segment(simgrid::s4u::Actor::self());
}
//identify neighbours in comm
- //get the indexes of all processes sharing the same simix host
- const auto& process_list = sg_host_self()->extension<simgrid::simix::Host>()->process_list;
+ //get the indices of all processes sharing the same simix host
+ auto& process_list = sg_host_self()->extension<simgrid::simix::Host>()->process_list;
int intra_comm_size = 0;
int min_index = INT_MAX; // the minimum index will be the leader
- for (auto const& actor : process_list) {
- int index = actor.pid - 1;
- if (this->group()->rank(index) != MPI_UNDEFINED) {
+ for (auto& actor : process_list) {
+ int index = actor.pid;
+ if (this->group()->rank(actor.iface()) != MPI_UNDEFINED) { // Is this process in the current group?
intra_comm_size++;
- // the process is in the comm
if (index < min_index)
min_index = index;
}
XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size);
MPI_Group group_intra = new Group(intra_comm_size);
int i = 0;
- for (auto const& actor : process_list) {
- int index = actor.pid - 1;
- if(this->group()->rank(index)!=MPI_UNDEFINED){
- group_intra->set_mapping(index, i);
+ for (auto& actor : process_list) {
+ if (this->group()->rank(actor.iface()) != MPI_UNDEFINED) {
+ group_intra->set_mapping(actor.iface(), i);
i++;
}
}
Coll_allgather_mpich::allgather(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this);
- if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(smpi_process()->index());
+ if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
+ // we need to switch as the called function may silently touch global variables
+ smpi_switch_data_segment(simgrid::s4u::Actor::self());
}
if(leaders_map_==nullptr){
}else{
delete[] leaders_map;
}
- int j=0;
int leader_group_size = 0;
for(i=0; i<comm_size; i++){
int already_done = 0;
- for (j = 0; j < leader_group_size; j++) {
+ for (int j = 0; j < leader_group_size; j++) {
if (leaders_map_[i] == leader_list[j]) {
already_done = 1;
}
if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && this!=MPI_COMM_WORLD){
//create leader_communicator
for (i=0; i< leader_group_size;i++)
- leaders_group->set_mapping(leader_list[i], i);
+ leaders_group->set_mapping(simgrid::s4u::Actor::by_pid(leader_list[i]), i);
leader_comm = new Comm(leaders_group, nullptr);
this->set_leaders_comm(leader_comm);
this->set_intra_comm(comm_intra);
// create intracommunicator
}else{
for (i=0; i< leader_group_size;i++)
- leaders_group->set_mapping(leader_list[i], i);
+ leaders_group->set_mapping(simgrid::s4u::Actor::by_pid(leader_list[i]), i);
if(this->get_leaders_comm()==MPI_COMM_NULL){
leader_comm = new Comm(leaders_group, nullptr);
}
Coll_bcast_mpich::bcast(&(is_uniform_),1, MPI_INT, 0, comm_intra );
- if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(smpi_process()->index());
+ if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
+ // we need to switch as the called function may silently touch global variables
+ smpi_switch_data_segment(simgrid::s4u::Actor::self());
}
// Are the ranks blocked ? = allocated contiguously on the SMP nodes
int is_blocked=1;
- int prev=this->group()->rank(comm_intra->group()->index(0));
+ int prev=this->group()->rank(comm_intra->group()->actor(0));
for (i = 1; i < my_local_size; i++) {
- int that = this->group()->rank(comm_intra->group()->index(i));
+ int that = this->group()->rank(comm_intra->group()->actor(i));
if (that != prev + 1) {
is_blocked = 0;
break;
}
}
+MPI_Comm Comm::split_type(int type, int key, MPI_Info info)
+{
+ if(type != MPI_COMM_TYPE_SHARED){
+ return MPI_COMM_NULL;
+ }
+ this->init_smp();
+ this->ref();
+ this->get_intra_comm()->ref();
+ return this->get_intra_comm();
+}
+
}
}