#ifdef SMPI_F2C
smpi_current_rank = index;
#endif
+
data = smpi_process_remote_data(index);
simcall_process_set_data(proc, data);
if (*argc > 2) {
// set the process attached to the mailbox
simcall_rdv_set_receiver(data->mailbox_small, proc);
XBT_DEBUG("<%d> New process in the game: %p", index, proc);
+
+ if(smpi_privatize_global_variables){
+ switch_data_segment(index);
+ }
+
}
if (smpi_process_data() == NULL)
xbt_die("smpi_process_data() returned NULL. You probably gave a NULL parameter to MPI_Init. Although it's required by MPI-2, this is currently not supported by SMPI.");
void smpi_process_destroy(void)
{
int index = smpi_process_index();
+ if(smpi_privatize_global_variables){
+ switch_data_segment(index);
+ }
process_data[index]->state = SMPI_FINALIZED;
XBT_DEBUG("<%d> Process left the game", index);
}
smpi_process_data_t data = smpi_process_data();
if(data->comm_self==MPI_COMM_NULL){
MPI_Group group = smpi_group_new(1);
- data->comm_self = smpi_comm_new(group);
+ data->comm_self = smpi_comm_new(group, NULL);
smpi_group_set_mapping(group, smpi_process_index(), 0);
}
{
XBT_DEBUG("Copy the data over");
if(_xbt_replay_is_active()) return;
- memcpy(comm->comm.dst_buff, buff, buff_size);
+ void* tmpbuff=buff;
+
+ if((smpi_privatize_global_variables)
+ && ((char*)buff >= start_data_exe)
+ && ((char*)buff < start_data_exe + size_data_exe )
+ ){
+ XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !");
+ switch_data_segment(((smpi_process_data_t)SIMIX_process_get_data(comm->comm.src_proc))->index);
+ tmpbuff = (void*)xbt_malloc(buff_size);
+ memcpy(tmpbuff, buff, buff_size);
+ }
+
+
+ if((smpi_privatize_global_variables)
+ && ((char*)comm->comm.dst_buff >= start_data_exe)
+ && ((char*)comm->comm.dst_buff < start_data_exe + size_data_exe )
+ ){
+ XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment");
+ switch_data_segment(((smpi_process_data_t)SIMIX_process_get_data(comm->comm.dst_proc))->index);
+ }
+
+
+ memcpy(comm->comm.dst_buff, tmpbuff, buff_size);
if (comm->comm.detached) {
// if this is a detached send, the source buffer was duplicated by SMPI
// sender to make the original buffer available to the application ASAP
//It seems that the request is used after the call there this should
//be free somewhereelse but where???
//xbt_free(comm->comm.src_data);// inside SMPI the request is keep
- //inside the user data and should be free
+ //inside the user data and should be free
comm->comm.src_buff = NULL;
}
+
+ if(tmpbuff!=buff)xbt_free(tmpbuff);
+
+}
+
+static void smpi_check_options(){
+ //check correctness of MPI parameters
+
+ xbt_assert(sg_cfg_get_int("smpi/async_small_thres") <=
+ sg_cfg_get_int("smpi/send_is_detached_thres"));
+
+ if (sg_cfg_is_default_value("smpi/running_power")) {
+ XBT_INFO("You did not set the power of the host running the simulation. "
+ "The timings will certainly not be accurate. "
+ "Use the option \"--cfg=smpi/running_power:<flops>\" to set its value."
+ "Check http://simgrid.org/simgrid/latest/doc/options.html#options_smpi_bench for more information. ");
+ }
}
void smpi_global_init(void)
process_data[i]->sampling = 0;
}
group = smpi_group_new(process_count);
- MPI_COMM_WORLD = smpi_comm_new(group);
+ MPI_COMM_WORLD = smpi_comm_new(group, NULL);
MPI_UNIVERSE_SIZE = smpi_comm_size(MPI_COMM_WORLD);
for (i = 0; i < process_count; i++) {
smpi_group_set_mapping(group, i, i);
"Use the option \"--cfg=smpi/running_power:<flops>\" to set its value."
"Check http://simgrid.org/simgrid/latest/doc/options.html#options_smpi_bench for more information. ");
}
+ if(smpi_privatize_global_variables)
+ smpi_initialize_global_memory_segments();
}
void smpi_global_destroy(void)
}
xbt_free(process_data);
process_data = NULL;
-
+ if(smpi_privatize_global_variables)
+ smpi_destroy_global_memory_segments();
smpi_free_static();
}
};
#endif
-int smpi_main(int (*realmain) (int argc, char *argv[]), int argc, char *argv[])
-{
- srand(SMPI_RAND_SEED);
-
- if (getenv("SMPI_PRETEND_CC") != NULL) {
- /* Hack to ensure that smpicc can pretend to be a simple
- * compiler. Particularly handy to pass it to the configuration tools */
- return 0;
- }
+static void smpi_init_logs(){
/* Connect log categories. See xbt/log.c */
XBT_LOG_CONNECT(smpi); /* Keep this line as soon as possible in this
XBT_LOG_CONNECT(smpi_pmpi);
XBT_LOG_CONNECT(smpi_replay);
+}
+
+
+static void smpi_init_options(){
+ int gather_id = find_coll_description(mpi_coll_gather_description,
+ sg_cfg_get_string("smpi/gather"));
+ mpi_coll_gather_fun = (int (*)(void *, int, MPI_Datatype,
+ void *, int, MPI_Datatype, int, MPI_Comm))
+ mpi_coll_gather_description[gather_id].coll;
+
+ int allgather_id = find_coll_description(mpi_coll_allgather_description,
+ sg_cfg_get_string("smpi/allgather"));
+ mpi_coll_allgather_fun = (int (*)(void *, int, MPI_Datatype,
+ void *, int, MPI_Datatype, MPI_Comm))
+ mpi_coll_allgather_description[allgather_id].coll;
+
+ int allgatherv_id = find_coll_description(mpi_coll_allgatherv_description,
+ sg_cfg_get_string("smpi/allgatherv"));
+ mpi_coll_allgatherv_fun = (int (*)(void *, int, MPI_Datatype, void *, int *,
+ int *, MPI_Datatype, MPI_Comm))
+ mpi_coll_allgatherv_description[allgatherv_id].coll;
+
+ int allreduce_id = find_coll_description(mpi_coll_allreduce_description,
+ sg_cfg_get_string("smpi/allreduce"));
+ mpi_coll_allreduce_fun = (int (*)(void *sbuf, void *rbuf, int rcount,
+ MPI_Datatype dtype, MPI_Op op,
+ MPI_Comm comm))
+ mpi_coll_allreduce_description[allreduce_id].coll;
+
+ int alltoall_id = find_coll_description(mpi_coll_alltoall_description,
+ sg_cfg_get_string("smpi/alltoall"));
+ mpi_coll_alltoall_fun = (int (*)(void *, int, MPI_Datatype,
+ void *, int, MPI_Datatype, MPI_Comm))
+ mpi_coll_alltoall_description[alltoall_id].coll;
+
+ int alltoallv_id = find_coll_description(mpi_coll_alltoallv_description,
+ sg_cfg_get_string("smpi/alltoallv"));
+ mpi_coll_alltoallv_fun = (int (*)(void *, int *, int *, MPI_Datatype,
+ void *, int *, int *, MPI_Datatype,
+ MPI_Comm))
+ mpi_coll_alltoallv_description[alltoallv_id].coll;
+
+ int bcast_id = find_coll_description(mpi_coll_bcast_description,
+ sg_cfg_get_string("smpi/bcast"));
+ mpi_coll_bcast_fun = (int (*)(void *buf, int count, MPI_Datatype datatype,
+ int root, MPI_Comm com))
+ mpi_coll_bcast_description[bcast_id].coll;
+
+ int reduce_id = find_coll_description(mpi_coll_reduce_description,
+ sg_cfg_get_string("smpi/reduce"));
+ mpi_coll_reduce_fun = (int (*)(void *buf, void *rbuf, int count,
+ MPI_Datatype datatype, MPI_Op op,
+ int root, MPI_Comm comm))
+ mpi_coll_reduce_description[reduce_id].coll;
+
+ int reduce_scatter_id =
+ find_coll_description(mpi_coll_reduce_scatter_description,
+ sg_cfg_get_string("smpi/reduce_scatter"));
+ mpi_coll_reduce_scatter_fun = (int (*)(void *sbuf, void *rbuf, int *rcounts,
+ MPI_Datatype dtype, MPI_Op op,
+ MPI_Comm comm))
+ mpi_coll_reduce_scatter_description[reduce_scatter_id].coll;
+
+ int scatter_id = find_coll_description(mpi_coll_scatter_description,
+ sg_cfg_get_string("smpi/scatter"));
+ mpi_coll_scatter_fun = (int (*)(void *sendbuf, int sendcount,
+ MPI_Datatype sendtype, void *recvbuf,
+ int recvcount, MPI_Datatype recvtype,
+ int root, MPI_Comm comm))
+ mpi_coll_scatter_description[scatter_id].coll;
+
+ int barrier_id = find_coll_description(mpi_coll_barrier_description,
+ sg_cfg_get_string("smpi/barrier"));
+ mpi_coll_barrier_fun = (int (*)(MPI_Comm comm))
+ mpi_coll_barrier_description[barrier_id].coll;
+
+ smpi_cpu_threshold = sg_cfg_get_double("smpi/cpu_threshold");
+ smpi_running_power = sg_cfg_get_double("smpi/running_power");
+ smpi_privatize_global_variables = sg_cfg_get_boolean("smpi/privatize_global_variables");
+ if (smpi_cpu_threshold < 0)
+ smpi_cpu_threshold = DBL_MAX;
+
+}
+
+int smpi_main(int (*realmain) (int argc, char *argv[]), int argc, char *argv[])
+{
+ srand(SMPI_RAND_SEED);
+
+ if (getenv("SMPI_PRETEND_CC") != NULL) {
+ /* Hack to ensure that smpicc can pretend to be a simple
+ * compiler. Particularly handy to pass it to the configuration tools */
+ return 0;
+ }
+
+ smpi_init_logs();
+
#ifdef HAVE_TRACING
TRACE_global_init(&argc, argv);
SIMIX_global_init(&argc, argv);
-#ifdef HAVE_TRACING
- TRACE_start();
-#endif
-
// parse the platform file: get the host list
SIMIX_create_environment(argv[1]);
-
+ SIMIX_comm_set_copy_data_callback(&smpi_comm_copy_buffer_callback);
SIMIX_function_register_default(realmain);
SIMIX_launch_application(argv[2]);
- int gather_id = find_coll_description(mpi_coll_gather_description,
- sg_cfg_get_string("smpi/gather"));
- mpi_coll_gather_fun = (int (*)(void *, int, MPI_Datatype,
- void *, int, MPI_Datatype, int, MPI_Comm))
- mpi_coll_gather_description[gather_id].coll;
-
- int allgather_id = find_coll_description(mpi_coll_allgather_description,
- sg_cfg_get_string("smpi/allgather"));
- mpi_coll_allgather_fun = (int (*)(void *, int, MPI_Datatype,
- void *, int, MPI_Datatype, MPI_Comm))
- mpi_coll_allgather_description[allgather_id].coll;
-
- int allgatherv_id = find_coll_description(mpi_coll_allgatherv_description,
- sg_cfg_get_string("smpi/allgatherv"));
- mpi_coll_allgatherv_fun = (int (*)(void *, int, MPI_Datatype, void *, int *,
- int *, MPI_Datatype, MPI_Comm))
- mpi_coll_allgatherv_description[allgatherv_id].coll;
-
- int allreduce_id = find_coll_description(mpi_coll_allreduce_description,
- sg_cfg_get_string("smpi/allreduce"));
- mpi_coll_allreduce_fun = (int (*)(void *sbuf, void *rbuf, int rcount,
- MPI_Datatype dtype, MPI_Op op,
- MPI_Comm comm))
- mpi_coll_allreduce_description[allreduce_id].coll;
-
- int alltoall_id = find_coll_description(mpi_coll_alltoall_description,
- sg_cfg_get_string("smpi/alltoall"));
- mpi_coll_alltoall_fun = (int (*)(void *, int, MPI_Datatype,
- void *, int, MPI_Datatype, MPI_Comm))
- mpi_coll_alltoall_description[alltoall_id].coll;
-
- int alltoallv_id = find_coll_description(mpi_coll_alltoallv_description,
- sg_cfg_get_string("smpi/alltoallv"));
- mpi_coll_alltoallv_fun = (int (*)(void *, int *, int *, MPI_Datatype,
- void *, int *, int *, MPI_Datatype,
- MPI_Comm))
- mpi_coll_alltoallv_description[alltoallv_id].coll;
-
- int bcast_id = find_coll_description(mpi_coll_bcast_description,
- sg_cfg_get_string("smpi/bcast"));
- mpi_coll_bcast_fun = (int (*)(void *buf, int count, MPI_Datatype datatype,
- int root, MPI_Comm com))
- mpi_coll_bcast_description[bcast_id].coll;
-
- int reduce_id = find_coll_description(mpi_coll_reduce_description,
- sg_cfg_get_string("smpi/reduce"));
- mpi_coll_reduce_fun = (int (*)(void *buf, void *rbuf, int count,
- MPI_Datatype datatype, MPI_Op op,
- int root, MPI_Comm comm))
- mpi_coll_reduce_description[reduce_id].coll;
-
- int reduce_scatter_id =
- find_coll_description(mpi_coll_reduce_scatter_description,
- sg_cfg_get_string("smpi/reduce_scatter"));
- mpi_coll_reduce_scatter_fun = (int (*)(void *sbuf, void *rbuf, int *rcounts,
- MPI_Datatype dtype, MPI_Op op,
- MPI_Comm comm))
- mpi_coll_reduce_scatter_description[reduce_scatter_id].coll;
-
- int scatter_id = find_coll_description(mpi_coll_scatter_description,
- sg_cfg_get_string("smpi/scatter"));
- mpi_coll_scatter_fun = (int (*)(void *sendbuf, int sendcount,
- MPI_Datatype sendtype, void *recvbuf,
- int recvcount, MPI_Datatype recvtype,
- int root, MPI_Comm comm))
- mpi_coll_scatter_description[scatter_id].coll;
-
- int barrier_id = find_coll_description(mpi_coll_barrier_description,
- sg_cfg_get_string("smpi/barrier"));
- mpi_coll_barrier_fun = (int (*)(MPI_Comm comm))
- mpi_coll_barrier_description[barrier_id].coll;
-
- smpi_cpu_threshold = sg_cfg_get_double("smpi/cpu_threshold");
- smpi_running_power = sg_cfg_get_double("smpi/running_power");
- if (smpi_cpu_threshold < 0)
- smpi_cpu_threshold = DBL_MAX;
+ smpi_init_options();
smpi_global_init();
+ smpi_check_options();
+
+ if(smpi_privatize_global_variables)
+ smpi_initialize_global_memory_segments();
+
/* Clean IO before the run */
fflush(stdout);
fflush(stderr);