-/* Copyright (c) 2010-2018. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2010-2019. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "smpi_comm.hpp"
-#include "private.hpp"
-#include "simgrid/s4u/Host.hpp"
#include "smpi_coll.hpp"
#include "smpi_datatype.hpp"
-#include "smpi_process.hpp"
#include "smpi_request.hpp"
-#include "smpi_status.hpp"
#include "smpi_win.hpp"
-#include "src/simix/smx_host_private.hpp"
-#include "src/simix/smx_private.hpp"
+#include "src/smpi/include/smpi_actor.hpp"
+#include "src/surf/HostImpl.hpp"
-#include <algorithm>
#include <climits>
-#include <vector>
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi, "Logging specific to SMPI (comm)");
simgrid::smpi::Comm mpi_MPI_COMM_UNINITIALIZED;
MPI_Comm MPI_COMM_UNINITIALIZED=&mpi_MPI_COMM_UNINITIALIZED;
-using simgrid::s4u::ActorPtr;
-
/* Support for cartesian topology was added, but there are 2 other types of topology, graph et dist graph. In order to
* support them, we have to add a field SMPI_Topo_type, and replace the MPI_Topology field by an union. */
std::unordered_map<int, smpi_key_elem> Comm::keyvals_;
int Comm::keyval_id_=0;
-Comm::Comm(MPI_Group group, MPI_Topology topo) : group_(group), topo_(topo)
+Comm::Comm(MPI_Group group, MPI_Topology topo, int smp) : group_(group), topo_(topo),is_smp_comm_(smp)
{
refcount_ = 1;
topoType_ = MPI_INVALID_TOPO;
}
int Comm::dup(MPI_Comm* newcomm){
- if (smpi_privatize_global_variables == SmpiPrivStrategies::Mmap) {
+ if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
// we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(simgrid::s4u::Actor::self());
+ smpi_switch_data_segment(s4u::Actor::self());
}
MPI_Group cp = new Group(this->group());
(*newcomm) = new Comm(cp, this->topo());
int ret = MPI_SUCCESS;
if (not attributes()->empty()) {
- int flag;
- void* value_out;
+ int flag=0;
+ void* value_out=nullptr;
for (auto const& it : *attributes()) {
smpi_key_elem elem = keyvals_.at(it.first);
- if (elem != nullptr && elem->copy_fn.comm_copy_fn != MPI_NULL_COPY_FN) {
- ret = elem->copy_fn.comm_copy_fn(this, it.first, nullptr, it.second, &value_out, &flag);
+ if (elem != nullptr){
+ if( elem->copy_fn.comm_copy_fn != MPI_NULL_COPY_FN &&
+ elem->copy_fn.comm_copy_fn != MPI_COMM_DUP_FN)
+ ret = elem->copy_fn.comm_copy_fn(this, it.first, elem->extra_state, it.second, &value_out, &flag);
+ else if ( elem->copy_fn.comm_copy_fn_fort != MPI_NULL_COPY_FN &&
+ *(int*)*elem->copy_fn.comm_copy_fn_fort != 1){
+ value_out=(int*)xbt_malloc(sizeof(int));
+ elem->copy_fn.comm_copy_fn_fort(this, it.first, elem->extra_state, it.second, value_out, &flag,&ret);
+ }
if (ret != MPI_SUCCESS) {
Comm::destroy(*newcomm);
*newcomm = MPI_COMM_NULL;
return ret;
}
- if (flag){
+ if (elem->copy_fn.comm_copy_fn == MPI_COMM_DUP_FN ||
+ ((elem->copy_fn.comm_copy_fn_fort != MPI_NULL_COPY_FN) && *(int*)*elem->copy_fn.comm_copy_fn_fort == 1)){
+ elem->refcount++;
+ (*newcomm)->attributes()->insert({it.first, it.second});
+ }else if (flag){
elem->refcount++;
(*newcomm)->attributes()->insert({it.first, value_out});
}
return group_;
}
-MPI_Topology Comm::topo() {
- return topo_;
-}
-
int Comm::size()
{
if (this == MPI_COMM_UNINITIALIZED)
{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->rank();
- return group_->rank(simgrid::s4u::Actor::self());
+ return group_->rank(s4u::Actor::self());
}
void Comm::get_name (char* name, int* len)
return;
}
if(this == MPI_COMM_WORLD) {
- strncpy(name, "WORLD",5);
+ strncpy(name, "WORLD", 6);
*len = 5;
} else {
*len = snprintf(name, MPI_MAX_NAME_STRING, "%p", this);
leaders_comm_=leaders;
}
-void Comm::set_intra_comm(MPI_Comm leaders){
- intra_comm_=leaders;
-}
-
int* Comm::get_non_uniform_map(){
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->get_non_uniform_map();
return is_blocked_;
}
+int Comm::is_smp_comm(){
+ if (this == MPI_COMM_UNINITIALIZED)
+ return smpi_process()->comm_world()->is_smp_comm();
+ return is_smp_comm_;
+}
+
MPI_Comm Comm::split(int color, int key)
{
if (this == MPI_COMM_UNINITIALIZED)
MPI_Group group_root = nullptr;
MPI_Group group_out = nullptr;
MPI_Group group = this->group();
- int rank = this->rank();
+ int myrank = this->rank();
int size = this->size();
/* Gather all colors and keys on rank 0 */
int* sendbuf = xbt_new(int, 2);
sendbuf[0] = color;
sendbuf[1] = key;
- if(rank == 0) {
+ if (myrank == 0) {
recvbuf = xbt_new(int, 2 * size);
} else {
recvbuf = nullptr;
Coll_gather_default::gather(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0, this);
xbt_free(sendbuf);
/* Do the actual job */
- if(rank == 0) {
+ if (myrank == 0) {
MPI_Group* group_snd = xbt_new(MPI_Group, size);
std::vector<std::pair<int, int>> rankmap;
rankmap.reserve(size);
group_root = group_out; /* Save root's group */
}
for (unsigned j = 0; j < rankmap.size(); j++) {
- ActorPtr actor = group->actor(rankmap[j].second);
+ s4u::ActorPtr actor = group->actor(rankmap[j].second);
group_out->set_mapping(actor, j);
}
MPI_Request* requests = xbt_new(MPI_Request, rankmap.size());
Comm::unref(intra_comm_);
if (leaders_comm_ != MPI_COMM_NULL)
Comm::unref(leaders_comm_);
- if (non_uniform_map_ != nullptr)
- xbt_free(non_uniform_map_);
- if (leaders_map_ != nullptr)
- delete[] leaders_map_;
+ xbt_free(non_uniform_map_);
+ delete[] leaders_map_;
}
void Comm::unref(Comm* comm){
smpi_process()->set_replaying(false);
}
- if (smpi_privatize_global_variables == SmpiPrivStrategies::Mmap) {
+ if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
// we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(simgrid::s4u::Actor::self());
+ smpi_switch_data_segment(s4u::Actor::self());
}
//identify neighbours in comm
//get the indices of all processes sharing the same simix host
- auto& process_list = sg_host_self()->extension<simgrid::simix::Host>()->process_list;
+ auto& process_list = sg_host_self()->pimpl_->process_list_;
int intra_comm_size = 0;
int min_index = INT_MAX; // the minimum index will be the leader
for (auto& actor : process_list) {
- int index = actor.pid;
+ int index = actor.get_pid();
if (this->group()->rank(actor.iface()) != MPI_UNDEFINED) { // Is this process in the current group?
intra_comm_size++;
if (index < min_index)
}
}
- MPI_Comm comm_intra = new Comm(group_intra, nullptr);
+ MPI_Comm comm_intra = new Comm(group_intra, nullptr, 1);
leader=min_index;
int* leaders_map = new int[comm_size];
std::fill_n(leaders_map, comm_size, 0);
std::fill_n(leader_list, comm_size, -1);
- Coll_allgather_mpich::allgather(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this);
+ Coll_allgather_ring::allgather(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this);
- if (smpi_privatize_global_variables == SmpiPrivStrategies::Mmap) {
+ if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
// we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(simgrid::s4u::Actor::self());
+ smpi_switch_data_segment(s4u::Actor::self());
}
if(leaders_map_==nullptr){
if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && this!=MPI_COMM_WORLD){
//create leader_communicator
for (i=0; i< leader_group_size;i++)
- leaders_group->set_mapping(simgrid::s4u::Actor::by_pid(leader_list[i]), i);
- leader_comm = new Comm(leaders_group, nullptr);
+ leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]), i);
+ leader_comm = new Comm(leaders_group, nullptr,1);
this->set_leaders_comm(leader_comm);
this->set_intra_comm(comm_intra);
// create intracommunicator
}else{
for (i=0; i< leader_group_size;i++)
- leaders_group->set_mapping(simgrid::s4u::Actor::by_pid(leader_list[i]), i);
+ leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]), i);
if(this->get_leaders_comm()==MPI_COMM_NULL){
- leader_comm = new Comm(leaders_group, nullptr);
+ leader_comm = new Comm(leaders_group, nullptr,1);
this->set_leaders_comm(leader_comm);
}else{
leader_comm=this->get_leaders_comm();
if(comm_intra->rank()==0) {
int is_uniform = 1;
int* non_uniform_map = xbt_new0(int,leader_group_size);
- Coll_allgather_mpich::allgather(&my_local_size, 1, MPI_INT,
+ Coll_allgather_ring::allgather(&my_local_size, 1, MPI_INT,
non_uniform_map, 1, MPI_INT, leader_comm);
for(i=0; i < leader_group_size; i++) {
if(non_uniform_map[0] != non_uniform_map[i]) {
}
is_uniform_=is_uniform;
}
- Coll_bcast_mpich::bcast(&(is_uniform_),1, MPI_INT, 0, comm_intra );
+ Coll_bcast_scatter_LR_allgather::bcast(&(is_uniform_),1, MPI_INT, 0, comm_intra );
- if (smpi_privatize_global_variables == SmpiPrivStrategies::Mmap) {
+ if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
// we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(simgrid::s4u::Actor::self());
+ smpi_switch_data_segment(s4u::Actor::self());
}
// Are the ranks blocked ? = allocated contiguously on the SMP nodes
int is_blocked=1;
}
}
+MPI_Comm Comm::split_type(int type, int key, MPI_Info info)
+{
+ if(type != MPI_COMM_TYPE_SHARED){
+ return MPI_COMM_NULL;
+ }
+ this->init_smp();
+ this->ref();
+ this->get_intra_comm()->ref();
+ return this->get_intra_comm();
}
-}
-
+} // namespace smpi
+} // namespace simgrid