Via
sed -i -r -e 's@smpi_process\(\)\->index\(\)@simgrid::s4u::Actor::self\(\)\->getPid\(\)@' src/smpi/**/*.cpp
if(already_init == 0){
simgrid::smpi::Process::init(argc, argv);
smpi_process()->mark_as_initialized();
if(already_init == 0){
simgrid::smpi::Process::init(argc, argv);
smpi_process()->mark_as_initialized();
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_init(rank);
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("init"));
TRACE_smpi_comm_out(rank);
TRACE_smpi_init(rank);
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("init"));
TRACE_smpi_comm_out(rank);
int PMPI_Finalize()
{
smpi_bench_end();
int PMPI_Finalize()
{
smpi_bench_end();
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("finalize"));
smpi_process()->finalize();
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("finalize"));
smpi_process()->finalize();
if (flag == nullptr) {
return MPI_ERR_ARG;
} else {
if (flag == nullptr) {
return MPI_ERR_ARG;
} else {
- *flag = smpi_process()->index() == 1; // FIXME: I don't think this is correct: This just returns true if the process ID is 1,
+ *flag = simgrid::s4u::Actor::self()->getPid() == 1; // FIXME: I don't think this is correct: This just returns true if the process ID is 1,
// regardless of whether this process called MPI_Thread_Init() or not.
return MPI_SUCCESS;
}
// regardless of whether this process called MPI_Thread_Init() or not.
return MPI_SUCCESS;
}
return MPI_ERR_GROUP;
} else if (newcomm == nullptr) {
return MPI_ERR_ARG;
return MPI_ERR_GROUP;
} else if (newcomm == nullptr) {
return MPI_ERR_ARG;
- } else if(group->rank(smpi_process()->index())==MPI_UNDEFINED){
+ } else if(group->rank(simgrid::s4u::Actor::self()->getPid())==MPI_UNDEFINED){
*newcomm= MPI_COMM_NULL;
return MPI_SUCCESS;
}else{
*newcomm= MPI_COMM_NULL;
return MPI_SUCCESS;
}else{
} else if (rank == nullptr) {
return MPI_ERR_ARG;
} else {
} else if (rank == nullptr) {
return MPI_ERR_ARG;
} else {
- *rank = group->rank(smpi_process()->index());
+ *rank = group->rank(simgrid::s4u::Actor::self()->getPid());
simgrid::smpi::Status::empty(status);
retval = MPI_SUCCESS;
} else {
simgrid::smpi::Status::empty(status);
retval = MPI_SUCCESS;
} else {
- int my_proc_id = ((*request)->comm() != MPI_COMM_NULL) ? smpi_process()->index() : -1;
+ int my_proc_id = ((*request)->comm() != MPI_COMM_NULL) ? simgrid::s4u::Actor::self()->getPid() : -1;
TRACE_smpi_testing_in(my_proc_id);
TRACE_smpi_testing_in(my_proc_id);
- int rank_traced = smpi_process()->index(); // FIXME: In PMPI_Wait, we check if the comm is null?
+ int rank_traced = simgrid::s4u::Actor::self()->getPid(); // FIXME: In PMPI_Wait, we check if the comm is null?
TRACE_smpi_comm_in(rank_traced, __FUNCTION__, new simgrid::instr::CpuTIData("waitAny", static_cast<double>(count)));
*index = simgrid::smpi::Request::waitany(count, requests, status);
TRACE_smpi_comm_in(rank_traced, __FUNCTION__, new simgrid::instr::CpuTIData("waitAny", static_cast<double>(count)));
*index = simgrid::smpi::Request::waitany(count, requests, status);
- int rank_traced = smpi_process()->index(); // FIXME: In PMPI_Wait, we check if the comm is null?
+ int rank_traced = simgrid::s4u::Actor::self()->getPid(); // FIXME: In PMPI_Wait, we check if the comm is null?
TRACE_smpi_comm_in(rank_traced, __FUNCTION__, new simgrid::instr::CpuTIData("waitAll", static_cast<double>(count)));
int retval = simgrid::smpi::Request::waitall(count, requests, status);
TRACE_smpi_comm_in(rank_traced, __FUNCTION__, new simgrid::instr::CpuTIData("waitAll", static_cast<double>(count)));
int retval = simgrid::smpi::Request::waitall(count, requests, status);
if (TRACE_is_enabled()) { \
simgrid::instr::EventType* type = simgrid::instr::Container::getRoot()->type_->getOrCreateEventType(#cat); \
\
if (TRACE_is_enabled()) { \
simgrid::instr::EventType* type = simgrid::instr::Container::getRoot()->type_->getOrCreateEventType(#cat); \
\
- std::string cont_name = std::string("rank-" + std::to_string(smpi_process()->index())); \
+ std::string cont_name = std::string("rank-" + std::to_string(simgrid::s4u::Actor::self()->getPid())); \
type->addEntityValue(Colls::mpi_coll_##cat##_description[i].name, "1.0 1.0 1.0"); \
new simgrid::instr::NewEvent(SIMIX_get_clock(), simgrid::instr::Container::byName(cont_name), type, \
type->getEntityValue(Colls::mpi_coll_##cat##_description[i].name)); \
type->addEntityValue(Colls::mpi_coll_##cat##_description[i].name, "1.0 1.0 1.0"); \
new simgrid::instr::NewEvent(SIMIX_get_clock(), simgrid::instr::Container::byName(cont_name), type, \
type->getEntityValue(Colls::mpi_coll_##cat##_description[i].name)); \
smx_activity_t action = simcall_execution_start("computation", flops, 1, 0, smpi_process()->process()->getImpl()->host);
simcall_set_category (action, TRACE_internal_smpi_get_category());
simcall_execution_wait(action);
smx_activity_t action = simcall_execution_start("computation", flops, 1, 0, smpi_process()->process()->getImpl()->host);
simcall_set_category (action, TRACE_internal_smpi_get_category());
simcall_execution_wait(action);
- smpi_switch_data_segment(smpi_process()->index());
+ smpi_switch_data_segment(simgrid::s4u::Actor::self()->getPid());
}
void smpi_execute(double duration)
}
void smpi_execute(double duration)
if (duration >= smpi_cpu_threshold) {
XBT_DEBUG("Sleep for %g to handle real computation time", duration);
double flops = duration * smpi_host_speed;
if (duration >= smpi_cpu_threshold) {
XBT_DEBUG("Sleep for %g to handle real computation time", duration);
double flops = duration * smpi_host_speed;
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_computing_in(rank, flops);
smpi_execute_flops(flops);
TRACE_smpi_computing_in(rank, flops);
smpi_execute_flops(flops);
void smpi_bench_begin()
{
if (smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) {
void smpi_bench_begin()
{
if (smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) {
- smpi_switch_data_segment(smpi_process()->index());
+ smpi_switch_data_segment(simgrid::s4u::Actor::self()->getPid());
}
if (MC_is_active() || MC_record_replay_is_active())
}
if (MC_is_active() || MC_record_replay_is_active())
#if HAVE_PAPI
if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0' && TRACE_smpi_is_enabled()) {
container_t container =
#if HAVE_PAPI
if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0' && TRACE_smpi_is_enabled()) {
container_t container =
- new simgrid::instr::Container(std::string("rank-") + std::to_string(smpi_process()->index()));
+ new simgrid::instr::Container(std::string("rank-") + std::to_string(simgrid::s4u::Actor::self()->getPid()));
papi_counter_t& counter_data = smpi_process()->papi_counters();
for (auto const& pair : counter_data) {
papi_counter_t& counter_data = smpi_process()->papi_counters();
for (auto const& pair : counter_data) {
SampleLocation(bool global, const char* file, int line) : std::string(std::string(file) + ":" + std::to_string(line))
{
if (not global)
SampleLocation(bool global, const char* file, int line) : std::string(std::string(file) + ":" + std::to_string(line))
{
if (not global)
- this->append(":" + std::to_string(smpi_process()->index()));
+ this->append(":" + std::to_string(simgrid::s4u::Actor::self()->getPid()));
}
int smpi_process_index(){
}
int smpi_process_index(){
- return smpi_process()->index();
+ return simgrid::s4u::Actor::self()->getPid();
}
void * smpi_process_get_user_data(){
}
void * smpi_process_get_user_data(){
int Comm::dup(MPI_Comm* newcomm){
if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
int Comm::dup(MPI_Comm* newcomm){
if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(smpi_process()->index());
+ smpi_switch_data_segment(simgrid::s4u::Actor::self()->getPid());
}
MPI_Group cp = new Group(this->group());
(*newcomm) = new Comm(cp, this->topo());
}
MPI_Group cp = new Group(this->group());
(*newcomm) = new Comm(cp, this->topo());
}
if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
}
if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(smpi_process()->index());
+ smpi_switch_data_segment(simgrid::s4u::Actor::self()->getPid());
}
//identify neighbours in comm
//get the indices of all processes sharing the same simix host
}
//identify neighbours in comm
//get the indices of all processes sharing the same simix host
Coll_allgather_mpich::allgather(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this);
if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
Coll_allgather_mpich::allgather(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this);
if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(smpi_process()->index());
+ smpi_switch_data_segment(simgrid::s4u::Actor::self()->getPid());
}
if(leaders_map_==nullptr){
}
if(leaders_map_==nullptr){
Coll_bcast_mpich::bcast(&(is_uniform_),1, MPI_INT, 0, comm_intra );
if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
Coll_bcast_mpich::bcast(&(is_uniform_),1, MPI_INT, 0, comm_intra );
if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(smpi_process()->index());
+ smpi_switch_data_segment(simgrid::s4u::Actor::self()->getPid());
}
// Are the ranks blocked ? = allocated contiguously on the SMP nodes
int is_blocked=1;
}
// Are the ranks blocked ? = allocated contiguously on the SMP nodes
int is_blocked=1;
// FIXME Handle the case of a partial shared malloc.
if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){
// FIXME Handle the case of a partial shared malloc.
if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){
- smpi_switch_data_segment(smpi_process()->index());
+ smpi_switch_data_segment(simgrid::s4u::Actor::self()->getPid());
}
/* First check if we really have something to do */
if (recvcount > 0 && recvbuf != sendbuf) {
}
/* First check if we really have something to do */
if (recvcount > 0 && recvbuf != sendbuf) {
}
char* F2C::get_key_id(char* key, int id) {
}
char* F2C::get_key_id(char* key, int id) {
- std::snprintf(key, KEY_SIZE, "%x_%d", static_cast<unsigned>(id), smpi_process()->index());
+ std::snprintf(key, KEY_SIZE, "%x_%lu", static_cast<unsigned>(id), simgrid::s4u::Actor::self()->getPid());
{
if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){//we need to switch as the called function may silently touch global variables
XBT_DEBUG("Applying operation, switch to the right data frame ");
{
if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){//we need to switch as the called function may silently touch global variables
XBT_DEBUG("Applying operation, switch to the right data frame ");
- smpi_switch_data_segment(smpi_process()->index());
+ smpi_switch_data_segment(simgrid::s4u::Actor::self()->getPid());
}
if (not smpi_process()->replaying() && *len > 0) {
}
if (not smpi_process()->replaying() && *len > 0) {