#include "simgrid/sg_config.h"
#include "src/mc/mc_replay.h"
#include "src/msg/msg_private.h"
+#include "src/simix/SynchroComm.hpp"
+
#include <float.h> /* DBL_MAX */
#include <stdint.h>
char* instance_id;
int replaying; /* is the process replaying a trace */
xbt_bar_t finalization_barrier;
+ int return_value;
} s_smpi_process_data_t;
static smpi_process_data_t *process_data = NULL;
extern double smpi_total_benched_time;
xbt_os_timer_t global_timer;
MPI_Comm MPI_COMM_WORLD = MPI_COMM_UNINITIALIZED;
-
MPI_Errhandler *MPI_ERRORS_RETURN = NULL;
MPI_Errhandler *MPI_ERRORS_ARE_FATAL = NULL;
MPI_Errhandler *MPI_ERRHANDLER_NULL = NULL;
if (argc && argv) {
proc = SIMIX_process_self();
//FIXME: dirty cleanup method to avoid using msg cleanup functions on these processes when using MSG+SMPI
- SIMIX_process_set_cleanup_function(proc, SIMIX_process_cleanup);
+ SIMIX_process_set_cleanup_function(proc, MSG_process_cleanup_from_SIMIX);
char* instance_id = (*argv)[1];
int rank = xbt_str_parse_int((*argv)[2], "Invalid rank: %s");
index = smpi_process_index_of_smx_process(proc);
message, request, request->buf, request->size, request->src, request->dst, request->tag, request->flags);
}
-void smpi_comm_copy_buffer_callback(smx_synchro_t comm, void *buff, size_t buff_size)
+void smpi_comm_copy_buffer_callback(smx_synchro_t synchro, void *buff, size_t buff_size)
{
XBT_DEBUG("Copy the data over");
void* tmpbuff=buff;
+ simgrid::simix::Comm *comm = dynamic_cast<simgrid::simix::Comm*>(synchro);
if((smpi_privatize_global_variables) && ((char*)buff >= smpi_start_data_exe)
&& ((char*)buff < smpi_start_data_exe + smpi_size_data_exe )
){
XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !");
- smpi_switch_data_segment(((smpi_process_data_t)(((simdata_process_t)SIMIX_process_get_data(comm->comm.src_proc))->data))->index);
+
+
+ smpi_switch_data_segment(((smpi_process_data_t)(((simdata_process_t)SIMIX_process_get_data(comm->src_proc))->data))->index);
tmpbuff = (void*)xbt_malloc(buff_size);
memcpy(tmpbuff, buff, buff_size);
}
- if((smpi_privatize_global_variables) && ((char*)comm->comm.dst_buff >= smpi_start_data_exe)
- && ((char*)comm->comm.dst_buff < smpi_start_data_exe + smpi_size_data_exe )){
+ if((smpi_privatize_global_variables) && ((char*)comm->dst_buff >= smpi_start_data_exe)
+ && ((char*)comm->dst_buff < smpi_start_data_exe + smpi_size_data_exe )){
XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment");
- smpi_switch_data_segment(((smpi_process_data_t)(((simdata_process_t)SIMIX_process_get_data(comm->comm.dst_proc))->data))->index);
+ smpi_switch_data_segment(((smpi_process_data_t)(((simdata_process_t)SIMIX_process_get_data(comm->dst_proc))->data))->index);
}
- memcpy(comm->comm.dst_buff, tmpbuff, buff_size);
- if (comm->comm.detached) {
+ memcpy(comm->dst_buff, tmpbuff, buff_size);
+ if (comm->detached) {
// if this is a detached send, the source buffer was duplicated by SMPI
// sender to make the original buffer available to the application ASAP
xbt_free(buff);
//It seems that the request is used after the call there this should be free somewhere else but where???
//xbt_free(comm->comm.src_data);// inside SMPI the request is kept inside the user data and should be free
- comm->comm.src_buff = NULL;
+ comm->src_buff = NULL;
}
if(tmpbuff!=buff)xbt_free(tmpbuff);
process_data[i]->state = SMPI_UNINITIALIZED;
process_data[i]->sampling = 0;
process_data[i]->finalization_barrier = NULL;
+ process_data[i]->return_value = 0;
}
//if the process was launched through smpirun script we generate a global mpi_comm_world
//if not, we let MPI_COMM_NULL, and the comm world will be private to each mpi instance
if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
smpi_comm_cleanup_smp(MPI_COMM_WORLD);
smpi_comm_cleanup_attributes(MPI_COMM_WORLD);
+ if(smpi_coll_cleanup_callback!=NULL)
+ smpi_coll_cleanup_callback();
xbt_free(MPI_COMM_WORLD);
}
MPI_COMM_WORLD = MPI_COMM_NULL;
+ if (!MC_is_active()) {
+ xbt_os_timer_free(global_timer);
+ }
+
xbt_free(index_to_process_data);
if(smpi_privatize_global_variables)
smpi_destroy_global_memory_segments();
}
#ifndef WIN32
+
void __attribute__ ((weak)) user_main_()
{
xbt_die("Should not be in this smpi_simulated_main");
return 0;
}
+inline static int smpi_main_wrapper(int argc, char **argv){
+ int ret = smpi_simulated_main_(argc,argv);
+ if(ret !=0){
+ XBT_WARN("SMPI process did not return 0. Return value : %d", ret);
+ smpi_process_data()->return_value=ret;
+ }
+ return 0;
+}
+
int __attribute__ ((weak)) main(int argc, char **argv)
{
- return smpi_main(smpi_simulated_main_, argc, argv);
+ return smpi_main(smpi_main_wrapper, argc, argv);
}
#endif
int barrier_id = find_coll_description(mpi_coll_barrier_description, xbt_cfg_get_string("smpi/barrier"),"barrier");
mpi_coll_barrier_fun = (int (*)(MPI_Comm comm)) mpi_coll_barrier_description[barrier_id].coll;
+ smpi_coll_cleanup_callback=NULL;
smpi_cpu_threshold = xbt_cfg_get_double("smpi/cpu-threshold");
smpi_running_power = xbt_cfg_get_double("smpi/running-power");
smpi_privatize_global_variables = xbt_cfg_get_boolean("smpi/privatize-global-variables");
"You may want to use sampling functions or trace replay to reduce this.");
}
}
+ int count = smpi_process_count();
+ int i, ret=0;
+ for (i = 0; i < count; i++) {
+ if(process_data[i]->return_value!=0){
+ ret=process_data[i]->return_value;//return first non 0 value
+ break;
+ }
+ }
smpi_global_destroy();
TRACE_end();
- return 0;
+ return ret;
}
// This function can be called from extern file, to initialize logs, options, and processes of smpi