X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/16617733636d3dd60be16310b6bc33de7b38b9f8..da41b147264c6eb7febc5be6cfb2380eab69ba70:/src/smpi/smpi_replay.c diff --git a/src/smpi/smpi_replay.c b/src/smpi/smpi_replay.c index dfc0d229e5..5991906ea4 100644 --- a/src/smpi/smpi_replay.c +++ b/src/smpi/smpi_replay.c @@ -16,7 +16,8 @@ int communicator_size = 0; static int active_processes = 0; xbt_dynar_t *reqq = NULL; -MPI_Datatype MPI_DEFAULT_TYPE, MPI_CURRENT_TYPE; +MPI_Datatype MPI_DEFAULT_TYPE; +MPI_Datatype MPI_CURRENT_TYPE; static void log_timed_action (const char *const *action, double clock){ if (XBT_LOG_ISENABLED(smpi_replay, xbt_log_priority_verbose)){ @@ -452,7 +453,7 @@ static void action_reduce(const char *const *action) int root_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), 0); TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__); #endif - smpi_mpi_reduce(NULL, NULL, comm_size, MPI_CURRENT_TYPE, MPI_OP_NULL, root, MPI_COMM_WORLD); + mpi_coll_reduce_fun(NULL, NULL, comm_size, MPI_CURRENT_TYPE, MPI_OP_NULL, root, MPI_COMM_WORLD); smpi_execute_flops(comp_size); #ifdef HAVE_TRACING TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__); @@ -475,9 +476,9 @@ static void action_allReduce(const char *const *action) { TRACE_smpi_computing_out(rank); TRACE_smpi_collective_in(rank, -1, __FUNCTION__); #endif - smpi_mpi_reduce(NULL, NULL, comm_size, MPI_CURRENT_TYPE, MPI_OP_NULL, 0, MPI_COMM_WORLD); + mpi_coll_reduce_fun(NULL, NULL, comm_size, MPI_CURRENT_TYPE, MPI_OP_NULL, 0, MPI_COMM_WORLD); smpi_execute_flops(comp_size); - smpi_mpi_bcast(NULL, comm_size, MPI_CURRENT_TYPE, 0, MPI_COMM_WORLD); + mpi_coll_bcast_fun(NULL, comm_size, MPI_CURRENT_TYPE, 0, MPI_COMM_WORLD); #ifdef HAVE_TRACING TRACE_smpi_collective_out(rank, -1, __FUNCTION__); TRACE_smpi_computing_in(rank); @@ -491,31 +492,82 @@ static void action_allToAll(const char *const *action) { int comm_size = smpi_comm_size(MPI_COMM_WORLD); int send_size = parse_double(action[2]); int recv_size = parse_double(action[3]); - void *send = xbt_new0(int, send_size*comm_size); - void *recv = xbt_new0(int, send_size*comm_size); + MPI_Datatype MPI_CURRENT_TYPE2; - if(action[4]) MPI_CURRENT_TYPE=decode_datatype(action[4]); - else MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE; + if(action[4]) { + MPI_CURRENT_TYPE=decode_datatype(action[4]); + MPI_CURRENT_TYPE2=decode_datatype(action[5]); + } + else { + MPI_CURRENT_TYPE=MPI_DEFAULT_TYPE; + MPI_CURRENT_TYPE2=MPI_DEFAULT_TYPE; + } + void *send = calloc(send_size*comm_size, smpi_datatype_size(MPI_CURRENT_TYPE)); + void *recv = calloc(recv_size*comm_size, smpi_datatype_size(MPI_CURRENT_TYPE2)); #ifdef HAVE_TRACING int rank = smpi_process_index(); TRACE_smpi_computing_out(rank); TRACE_smpi_collective_in(rank, -1, __FUNCTION__); #endif + + mpi_coll_alltoall_fun(send, send_size, MPI_CURRENT_TYPE, recv, recv_size, MPI_CURRENT_TYPE2, MPI_COMM_WORLD); + +#ifdef HAVE_TRACING + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); + TRACE_smpi_computing_in(rank); +#endif + + log_timed_action (action, clock); + xbt_free(send); + xbt_free(recv); +} + + +static void action_gather(const char *const *action) { + + + /* + The structure of the gather action for the rank 0 (total 4 processes) + is the following: + 0 gather 68 68 0 0 0 - if (send_size < 200 && comm_size > 12) { - smpi_coll_tuned_alltoall_bruck(send, send_size, MPI_CURRENT_TYPE, - recv, recv_size, MPI_CURRENT_TYPE, - MPI_COMM_WORLD); - } else if (send_size < 3000) { - smpi_coll_tuned_alltoall_basic_linear(send, send_size, MPI_CURRENT_TYPE, - recv, recv_size, MPI_CURRENT_TYPE, - MPI_COMM_WORLD); + where: + 1) 68 is the sendcounts + 2) 68 is the recvcounts + 3) 0 is the root node + 4) 0 is the send datatype id, see decode_datatype() + 5) 0 is the recv datatype id, see decode_datatype() + + */ + double clock = smpi_process_simulated_elapsed(); + int comm_size = smpi_comm_size(MPI_COMM_WORLD); + int send_size = parse_double(action[2]); + int recv_size = parse_double(action[3]); + MPI_Datatype MPI_CURRENT_TYPE2; + if(action[5]) { + MPI_CURRENT_TYPE=decode_datatype(action[5]); + MPI_CURRENT_TYPE2=decode_datatype(action[6]); } else { - smpi_coll_tuned_alltoall_pairwise(send, send_size, MPI_CURRENT_TYPE, - recv, recv_size, MPI_CURRENT_TYPE, - MPI_COMM_WORLD); + MPI_CURRENT_TYPE=MPI_DEFAULT_TYPE; + MPI_CURRENT_TYPE2=MPI_DEFAULT_TYPE; } + void *send = calloc(send_size, smpi_datatype_size(MPI_CURRENT_TYPE)); + void *recv = calloc(recv_size, smpi_datatype_size(MPI_CURRENT_TYPE2)); + + int root=atoi(action[4]); + int rank = smpi_process_index(); + + if(rank==root) + recv = calloc(recv_size*comm_size, smpi_datatype_size(MPI_CURRENT_TYPE2)); + +#ifdef HAVE_TRACING + TRACE_smpi_computing_out(rank); + TRACE_smpi_collective_in(rank, -1, __FUNCTION__); +#endif +smpi_mpi_gather(send, send_size, MPI_CURRENT_TYPE, + recv, recv_size, MPI_CURRENT_TYPE2, + root, MPI_COMM_WORLD); #ifdef HAVE_TRACING TRACE_smpi_collective_out(rank, -1, __FUNCTION__); @@ -527,6 +579,126 @@ static void action_allToAll(const char *const *action) { xbt_free(recv); } + +static void action_reducescatter(const char *const *action) { + + /* + The structure of the reducescatter action for the rank 0 (total 4 processes) + is the following: +0 reduceScatter 275427 275427 275427 204020 11346849 0 + + where: + 1) The first four values after the name of the action declare the recvcounts array + 2) The value 11346849 is the amount of instructions + 3) The last value corresponds to the datatype, see decode_datatype(). + + We analyze a MPI_Reduce_scatter call to one MPI_Reduce and one MPI_Scatterv. + + */ + + double clock = smpi_process_simulated_elapsed(); + int comm_size = smpi_comm_size(MPI_COMM_WORLD); + int comp_size = parse_double(action[2+comm_size]); + int *recvcounts = xbt_new0(int, comm_size); + int *disps = xbt_new0(int, comm_size); + int i=0,recv_sum=0; + int root=0; + int rank = smpi_process_index(); + + if(action[3+comm_size]) + MPI_CURRENT_TYPE=decode_datatype(action[3+comm_size]); + else + MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE; + + for(i=0;i