#include "src/simix/smx_private.h"
#include "surf/surf.h"
#include "xbt/replay.hpp"
+#include <xbt/config.hpp>
#include <float.h> /* DBL_MAX */
#include <fstream>
#endif
std::unordered_map<std::string, double> location2speedup;
-Process **process_data = nullptr;
+simgrid::smpi::Process **process_data = nullptr;
int process_count = 0;
int smpi_universe_size = 0;
int* index_to_process_data = nullptr;
MPI_Errhandler *MPI_ERRORS_RETURN = nullptr;
MPI_Errhandler *MPI_ERRORS_ARE_FATAL = nullptr;
MPI_Errhandler *MPI_ERRHANDLER_NULL = nullptr;
+static simgrid::config::Flag<double> smpi_wtime_sleep(
+ "smpi/wtime", "Minimum time to inject inside a call to MPI_Wtime", 0.0);
+static simgrid::config::Flag<double> smpi_init_sleep(
+ "smpi/init", "Time to inject inside a call to MPI_Init", 0.0);
void (*smpi_comm_copy_data_callback) (smx_activity_t, void*, size_t) = &smpi_comm_copy_buffer_callback;
return process_count;
}
-Process* smpi_process()
+simgrid::smpi::Process* smpi_process()
{
simgrid::MsgActorExt* msgExt = static_cast<simgrid::MsgActorExt*>(SIMIX_process_self()->data);
- return static_cast<Process*>(msgExt->data);
+ return static_cast<simgrid::smpi::Process*>(msgExt->data);
}
-Process* smpi_process_remote(int index)
+simgrid::smpi::Process* smpi_process_remote(int index)
{
return process_data[index_to_process_data[index]];
}
}
void smpi_process_init(int *argc, char ***argv){
- Process::init(argc, argv);
+ simgrid::smpi::Process::init(argc, argv);
}
int smpi_process_index(){
void smpi_comm_copy_buffer_callback(smx_activity_t synchro, void *buff, size_t buff_size)
{
- XBT_DEBUG("Copy the data over");
- void* tmpbuff=buff;
+
simgrid::kernel::activity::Comm *comm = dynamic_cast<simgrid::kernel::activity::Comm*>(synchro);
- if((smpi_privatize_global_variables) && (static_cast<char*>(buff) >= smpi_start_data_exe)
- && (static_cast<char*>(buff) < smpi_start_data_exe + smpi_size_data_exe )
- ){
- XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !");
+ XBT_DEBUG("Copy the data over");
+ if(smpi_is_shared(buff)){
+ XBT_DEBUG("Sender %p is shared. Let's ignore it.", buff);
+ }else if(smpi_is_shared((char*)comm->dst_buff)){
+ XBT_DEBUG("Receiver %p is shared. Let's ignore it.", (char*)comm->dst_buff);
+ }else{
+ void* tmpbuff=buff;
+ if((smpi_privatize_global_variables) && (static_cast<char*>(buff) >= smpi_start_data_exe)
+ && (static_cast<char*>(buff) < smpi_start_data_exe + smpi_size_data_exe )
+ ){
+ XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !");
+
+ smpi_switch_data_segment(
+ (static_cast<simgrid::smpi::Process*>((static_cast<simgrid::MsgActorExt*>(comm->src_proc->data)->data))->index()));
+ tmpbuff = static_cast<void*>(xbt_malloc(buff_size));
+ memcpy(tmpbuff, buff, buff_size);
+ }
- smpi_switch_data_segment(
- (static_cast<Process*>((static_cast<simgrid::MsgActorExt*>(comm->src_proc->data)->data))->index()));
- tmpbuff = static_cast<void*>(xbt_malloc(buff_size));
- memcpy(tmpbuff, buff, buff_size);
- }
+ if((smpi_privatize_global_variables) && ((char*)comm->dst_buff >= smpi_start_data_exe)
+ && ((char*)comm->dst_buff < smpi_start_data_exe + smpi_size_data_exe )){
+ XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment");
+ smpi_switch_data_segment(
+ (static_cast<simgrid::smpi::Process*>((static_cast<simgrid::MsgActorExt*>(comm->dst_proc->data)->data))->index()));
+ }
- if((smpi_privatize_global_variables) && ((char*)comm->dst_buff >= smpi_start_data_exe)
- && ((char*)comm->dst_buff < smpi_start_data_exe + smpi_size_data_exe )){
- XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment");
- smpi_switch_data_segment(
- (static_cast<Process*>((static_cast<simgrid::MsgActorExt*>(comm->dst_proc->data)->data))->index()));
- }
+ XBT_DEBUG("Copying %zu bytes from %p to %p", buff_size, tmpbuff,comm->dst_buff);
+ memcpy(comm->dst_buff, tmpbuff, buff_size);
- memcpy(comm->dst_buff, tmpbuff, buff_size);
- if (comm->detached) {
- // if this is a detached send, the source buffer was duplicated by SMPI
- // sender to make the original buffer available to the application ASAP
- xbt_free(buff);
- //It seems that the request is used after the call there this should be free somewhere else but where???
- //xbt_free(comm->comm.src_data);// inside SMPI the request is kept inside the user data and should be free
- comm->src_buff = nullptr;
+ if (comm->detached) {
+ // if this is a detached send, the source buffer was duplicated by SMPI
+ // sender to make the original buffer available to the application ASAP
+ xbt_free(buff);
+ //It seems that the request is used after the call there this should be free somewhere else but where???
+ //xbt_free(comm->comm.src_data);// inside SMPI the request is kept inside the user data and should be free
+ comm->src_buff = nullptr;
+ }
+ if(tmpbuff!=buff)xbt_free(tmpbuff);
}
- if(tmpbuff!=buff)xbt_free(tmpbuff);
}
void smpi_comm_null_copy_buffer_callback(smx_activity_t comm, void *buff, size_t buff_size)
smpirun=1;
}
smpi_universe_size = process_count;
- process_data = new Process*[process_count];
+ process_data = new simgrid::smpi::Process*[process_count];
for (i = 0; i < process_count; i++) {
- process_data[i] = new Process(i);
+ process_data[i] = new simgrid::smpi::Process(i);
}
//if the process was launched through smpirun script we generate a global mpi_comm_world
//if not, we let MPI_COMM_NULL, and the comm world will be private to each mpi instance
if(smpirun){
- group = new Group(process_count);
- MPI_COMM_WORLD = new Comm(group, nullptr);
+ group = new simgrid::smpi::Group(process_count);
+ MPI_COMM_WORLD = new simgrid::smpi::Comm(group, nullptr);
MPI_Attr_put(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, reinterpret_cast<void *>(process_count));
msg_bar_t bar = MSG_barrier_init(process_count);
int count = smpi_process_count();
smpi_bench_destroy();
+ smpi_shared_destroy();
if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
delete MPI_COMM_WORLD->group();
MSG_barrier_destroy(process_data[0]->finalization_barrier());
}
for (int i = 0; i < count; i++) {
if(process_data[i]->comm_self()!=MPI_COMM_NULL){
- Comm::destroy(process_data[i]->comm_self());
+ simgrid::smpi::Comm::destroy(process_data[i]->comm_self());
}
if(process_data[i]->comm_intra()!=MPI_COMM_NULL){
- Comm::destroy(process_data[i]->comm_intra());
+ simgrid::smpi::Comm::destroy(process_data[i]->comm_intra());
}
xbt_os_timer_free(process_data[i]->timer());
xbt_mutex_destroy(process_data[i]->mailboxes_mutex());
if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
MPI_COMM_WORLD->cleanup_smp();
- MPI_COMM_WORLD->cleanup_attr<Comm>();
- if(Colls::smpi_coll_cleanup_callback!=nullptr)
- Colls::smpi_coll_cleanup_callback();
+ MPI_COMM_WORLD->cleanup_attr<simgrid::smpi::Comm>();
+ if(simgrid::smpi::Colls::smpi_coll_cleanup_callback!=nullptr)
+ simgrid::smpi::Colls::smpi_coll_cleanup_callback();
delete MPI_COMM_WORLD;
}
int __attribute__ ((weak)) smpi_simulated_main_(int argc, char **argv)
{
- Process::init(&argc, &argv);
+ simgrid::smpi::Process::init(&argc, &argv);
user_main_();
return 0;
}
XBT_LOG_CONNECT(smpi); /* Keep this line as soon as possible in this function: xbt_log_appender_file.c depends on it
DO NOT connect this in XBT or so, or it will be useless to xbt_log_appender_file.c */
XBT_LOG_CONNECT(instr_smpi);
- XBT_LOG_CONNECT(smpi_base);
XBT_LOG_CONNECT(smpi_bench);
XBT_LOG_CONNECT(smpi_coll);
XBT_LOG_CONNECT(smpi_colls);
XBT_LOG_CONNECT(smpi_request);
XBT_LOG_CONNECT(smpi_replay);
XBT_LOG_CONNECT(smpi_rma);
+ XBT_LOG_CONNECT(smpi_shared);
XBT_LOG_CONNECT(smpi_utils);
}
}
static void smpi_init_options(){
- Colls::set_collectives();
- Colls::smpi_coll_cleanup_callback=nullptr;
+ simgrid::smpi::Colls::set_collectives();
+ simgrid::smpi::Colls::smpi_coll_cleanup_callback=nullptr;
smpi_cpu_threshold = xbt_cfg_get_double("smpi/cpu-threshold");
smpi_host_speed = xbt_cfg_get_double("smpi/host-speed");
smpi_privatize_global_variables = xbt_cfg_get_boolean("smpi/privatize-global-variables");
void SMPI_finalize(){
smpi_global_destroy();
}
+
+void smpi_mpi_init() {
+ if(smpi_init_sleep > 0)
+ simcall_process_sleep(smpi_init_sleep);
+}
+
+double smpi_mpi_wtime(){
+ double time;
+ if (smpi_process()->initialized() != 0 && smpi_process()->finalized() == 0 && smpi_process()->sampling() == 0) {
+ smpi_bench_end();
+ time = SIMIX_get_clock();
+ // to avoid deadlocks if used as a break condition, such as
+ // while (MPI_Wtime(...) < time_limit) {
+ // ....
+ // }
+ // because the time will not normally advance when only calls to MPI_Wtime
+ // are made -> deadlock (MPI_Wtime never reaches the time limit)
+ if(smpi_wtime_sleep > 0)
+ simcall_process_sleep(smpi_wtime_sleep);
+ smpi_bench_begin();
+ } else {
+ time = SIMIX_get_clock();
+ }
+ return time;
+}
+