X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/97b87e07b86d73c2e40abdf13cf8f10e2729f985..bdf8656075bd24c0a5ea802acce0272e42b8ce25:/src/smpi/smpi_replay.c diff --git a/src/smpi/smpi_replay.c b/src/smpi/smpi_replay.c index c2002b545a..4507d5286b 100644 --- a/src/smpi/smpi_replay.c +++ b/src/smpi/smpi_replay.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2009, 2010, 2011, 2012. The SimGrid Team. +/* Copyright (c) 2009-2013. The SimGrid Team. * All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it @@ -16,7 +16,8 @@ int communicator_size = 0; static int active_processes = 0; xbt_dynar_t *reqq = NULL; -MPI_Datatype MPI_DEFAULT_TYPE, MPI_CURRENT_TYPE; +MPI_Datatype MPI_DEFAULT_TYPE; +MPI_Datatype MPI_CURRENT_TYPE; static void log_timed_action (const char *const *action, double clock){ if (XBT_LOG_ISENABLED(smpi_replay, xbt_log_priority_verbose)){ @@ -27,7 +28,6 @@ static void log_timed_action (const char *const *action, double clock){ } typedef struct { - xbt_dynar_t isends; /* of MPI_Request */ xbt_dynar_t irecvs; /* of MPI_Request */ } s_smpi_replay_globals_t, *smpi_replay_globals_t; @@ -46,7 +46,7 @@ static double parse_double(const char *string) static MPI_Datatype decode_datatype(const char *const action) { // Declared datatypes, - + switch(atoi(action)) { case 0: @@ -72,22 +72,48 @@ static MPI_Datatype decode_datatype(const char *const action) break; default: MPI_CURRENT_TYPE=MPI_DEFAULT_TYPE; - + } return MPI_CURRENT_TYPE; } + +const char* encode_datatype(MPI_Datatype datatype) +{ + + //default type for output is set to MPI_BYTE + // MPI_DEFAULT_TYPE is not set for output, use directly MPI_BYTE + if (datatype==MPI_BYTE){ + return ""; + } + if(datatype==MPI_DOUBLE) + return "0"; + if(datatype==MPI_INT) + return "1"; + if(datatype==MPI_CHAR) + return "2"; + if(datatype==MPI_SHORT) + return "3"; + if(datatype==MPI_LONG) + return "4"; + if(datatype==MPI_FLOAT) + return "5"; + + // default - not implemented. + // do not warn here as we pass in this function even for other trace formats + return "-1"; +} + static void action_init(const char *const *action) { int i; XBT_DEBUG("Initialize the counters"); smpi_replay_globals_t globals = xbt_new(s_smpi_replay_globals_t, 1); - globals->isends = xbt_dynar_new(sizeof(MPI_Request),NULL); globals->irecvs = xbt_dynar_new(sizeof(MPI_Request),NULL); if(action[2]) MPI_DEFAULT_TYPE= MPI_DOUBLE; // default MPE dataype else MPI_DEFAULT_TYPE= MPI_BYTE; // default TAU datatype - + smpi_process_set_user_data((void*) globals); /* start a simulated timer */ @@ -97,7 +123,7 @@ static void action_init(const char *const *action) if (!reqq) { reqq=xbt_new0(xbt_dynar_t,active_processes); - + for(i=0;iisends),xbt_dynar_length(globals->irecvs)); - xbt_dynar_free_container(&(globals->isends)); + XBT_DEBUG("There are %lu irecvs in the dynar", + xbt_dynar_length(globals->irecvs)); xbt_dynar_free_container(&(globals->irecvs)); } free(globals); @@ -142,7 +167,18 @@ static void action_comm_dup(const char *const *action) static void action_compute(const char *const *action) { double clock = smpi_process_simulated_elapsed(); - smpi_execute_flops(parse_double(action[2])); + double flops= parse_double(action[2]); +#ifdef HAVE_TRACING + int rank = smpi_comm_rank(MPI_COMM_WORLD); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type=TRACING_COMPUTING; + extra->comp_size=flops; + TRACE_smpi_computing_in(rank, extra); +#endif + smpi_execute_flops(flops); +#ifdef HAVE_TRACING + TRACE_smpi_computing_out(rank); +#endif log_timed_action (action, clock); } @@ -158,13 +194,19 @@ static void action_send(const char *const *action) } else { MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE; } - + #ifdef HAVE_TRACING int rank = smpi_comm_rank(MPI_COMM_WORLD); - TRACE_smpi_computing_out(rank); + int dst_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), to); - TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__); - TRACE_smpi_send(rank, rank, dst_traced); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_SEND; + extra->send_size = size; + extra->src = rank; + extra->dst = dst_traced; + extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE); + TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra); + TRACE_smpi_send(rank, rank, dst_traced, size*smpi_datatype_size(MPI_CURRENT_TYPE)); #endif smpi_mpi_send(NULL, size, MPI_CURRENT_TYPE, to , 0, MPI_COMM_WORLD); @@ -173,7 +215,6 @@ static void action_send(const char *const *action) #ifdef HAVE_TRACING TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__); - TRACE_smpi_computing_in(rank); #endif } @@ -188,25 +229,26 @@ static void action_Isend(const char *const *action) if(action[4]) MPI_CURRENT_TYPE=decode_datatype(action[4]); else MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE; - smpi_replay_globals_t globals = - (smpi_replay_globals_t) smpi_process_get_user_data(); #ifdef HAVE_TRACING int rank = smpi_comm_rank(MPI_COMM_WORLD); - TRACE_smpi_computing_out(rank); int dst_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), to); - TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__); - TRACE_smpi_send(rank, rank, dst_traced); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_ISEND; + extra->send_size = size; + extra->src = rank; + extra->dst = dst_traced; + extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE); + TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra); + TRACE_smpi_send(rank, rank, dst_traced, size*smpi_datatype_size(MPI_CURRENT_TYPE)); #endif request = smpi_mpi_isend(NULL, size, MPI_CURRENT_TYPE, to, 0,MPI_COMM_WORLD); - + #ifdef HAVE_TRACING TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__); request->send = 1; - TRACE_smpi_computing_in(rank); #endif - xbt_dynar_push(globals->isends,&request); xbt_dynar_push(reqq[smpi_comm_rank(MPI_COMM_WORLD)],&request); log_timed_action (action, clock); @@ -220,13 +262,18 @@ static void action_recv(const char *const *action) { if(action[4]) MPI_CURRENT_TYPE=decode_datatype(action[4]); else MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE; - + #ifdef HAVE_TRACING int rank = smpi_comm_rank(MPI_COMM_WORLD); int src_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), from); - TRACE_smpi_computing_out(rank); - TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_RECV; + extra->send_size = size; + extra->src = src_traced; + extra->dst = rank; + extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE); + TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, extra); #endif smpi_mpi_recv(NULL, size, MPI_CURRENT_TYPE, from, 0, MPI_COMM_WORLD, &status); @@ -234,7 +281,6 @@ static void action_recv(const char *const *action) { #ifdef HAVE_TRACING TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__); TRACE_smpi_recv(rank, src_traced, rank); - TRACE_smpi_computing_in(rank); #endif log_timed_action (action, clock); @@ -249,18 +295,24 @@ static void action_Irecv(const char *const *action) smpi_replay_globals_t globals = (smpi_replay_globals_t) smpi_process_get_user_data(); - + if(action[4]) MPI_CURRENT_TYPE=decode_datatype(action[4]); else MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE; #ifdef HAVE_TRACING int rank = smpi_comm_rank(MPI_COMM_WORLD); int src_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), from); - TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_IRECV; + extra->send_size = size; + extra->src = src_traced; + extra->dst = rank; + extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE); + TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, extra); #endif request = smpi_mpi_irecv(NULL, size, MPI_CURRENT_TYPE, from, 0, MPI_COMM_WORLD); - + #ifdef HAVE_TRACING TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__); request->recv = 1; @@ -286,13 +338,14 @@ static void action_wait(const char *const *action){ int rank = request && request->comm != MPI_COMM_NULL ? smpi_comm_rank(request->comm) : -1; - TRACE_smpi_computing_out(rank); MPI_Group group = smpi_comm_group(request->comm); int src_traced = smpi_group_rank(group, request->src); int dst_traced = smpi_group_rank(group, request->dst); int is_wait_for_receive = request->recv; - TRACE_smpi_ptp_in(rank, src_traced, dst_traced, __FUNCTION__); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_WAIT; + TRACE_smpi_ptp_in(rank, src_traced, dst_traced, __FUNCTION__, extra); #endif smpi_mpi_wait(&request, &status); #ifdef HAVE_TRACING @@ -300,7 +353,6 @@ static void action_wait(const char *const *action){ if (is_wait_for_receive) { TRACE_smpi_recv(rank, src_traced, dst_traced); } - TRACE_smpi_computing_in(rank); #endif log_timed_action (action, clock); @@ -316,14 +368,14 @@ static void action_waitall(const char *const *action){ if (count_requests>0) { MPI_Request requests[count_requests]; MPI_Status status[count_requests]; - + /* The reqq is an array of dynars. Its index corresponds to the rank. Thus each rank saves its own requests to the array request. */ xbt_dynar_foreach(reqq[smpi_comm_rank(MPI_COMM_WORLD)],i,requests[i]); - + #ifdef HAVE_TRACING //save information from requests - + xbt_dynar_t srcs = xbt_dynar_new(sizeof(int), NULL); xbt_dynar_t dsts = xbt_dynar_new(sizeof(int), NULL); xbt_dynar_t recvs = xbt_dynar_new(sizeof(int), NULL); @@ -350,10 +402,11 @@ static void action_waitall(const char *const *action){ } } int rank_traced = smpi_process_index(); - TRACE_smpi_computing_out(rank_traced); - - TRACE_smpi_ptp_in(rank_traced, -1, -1, __FUNCTION__); - #endif + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_WAITALL; + extra->send_size=count_requests; + TRACE_smpi_ptp_in(rank_traced, -1, -1, __FUNCTION__,extra); + #endif smpi_mpi_waitall(count_requests, requests, status); @@ -372,9 +425,8 @@ static void action_waitall(const char *const *action){ xbt_dynar_free(&srcs); xbt_dynar_free(&dsts); xbt_dynar_free(&recvs); - TRACE_smpi_computing_in(rank_traced); #endif - + xbt_dynar_reset(reqq[smpi_comm_rank(MPI_COMM_WORLD)]); } log_timed_action (action, clock); @@ -384,13 +436,13 @@ static void action_barrier(const char *const *action){ double clock = smpi_process_simulated_elapsed(); #ifdef HAVE_TRACING int rank = smpi_comm_rank(MPI_COMM_WORLD); - TRACE_smpi_computing_out(rank); - TRACE_smpi_collective_in(rank, -1, __FUNCTION__); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_BARRIER; + TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); #endif smpi_mpi_barrier(MPI_COMM_WORLD); #ifdef HAVE_TRACING TRACE_smpi_collective_out(rank, -1, __FUNCTION__); - TRACE_smpi_computing_in(rank); #endif log_timed_action (action, clock); @@ -414,18 +466,23 @@ static void action_bcast(const char *const *action) MPI_CURRENT_TYPE=decode_datatype(action[4]); } } - + #ifdef HAVE_TRACING int rank = smpi_comm_rank(MPI_COMM_WORLD); - TRACE_smpi_computing_out(rank); - int root_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), 0); - TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__); + int root_traced = smpi_group_index(smpi_comm_group(MPI_COMM_WORLD), root); + + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_BCAST; + extra->send_size = size; + extra->root = root_traced; + extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE); + TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra); + #endif - smpi_mpi_bcast(NULL, size, MPI_CURRENT_TYPE, root, MPI_COMM_WORLD); + mpi_coll_bcast_fun(NULL, size, MPI_CURRENT_TYPE, root, MPI_COMM_WORLD); #ifdef HAVE_TRACING TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__); - TRACE_smpi_computing_in(rank); #endif log_timed_action (action, clock); @@ -438,25 +495,30 @@ static void action_reduce(const char *const *action) double clock = smpi_process_simulated_elapsed(); int root=0; MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE; - + if(action[4]) { - root= atoi(action[4]); - if(action[5]) { - MPI_CURRENT_TYPE=decode_datatype(action[5]); - } + root= atoi(action[4]); + if(action[5]) { + MPI_CURRENT_TYPE=decode_datatype(action[5]); + } } #ifdef HAVE_TRACING int rank = smpi_comm_rank(MPI_COMM_WORLD); - TRACE_smpi_computing_out(rank); - int root_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), 0); - TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__); + int root_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), root); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_REDUCE; + extra->send_size = comm_size; + extra->comp_size = comp_size; + extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE); + extra->root = root_traced; + + TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__,extra); #endif mpi_coll_reduce_fun(NULL, NULL, comm_size, MPI_CURRENT_TYPE, MPI_OP_NULL, root, MPI_COMM_WORLD); smpi_execute_flops(comp_size); #ifdef HAVE_TRACING TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__); - TRACE_smpi_computing_in(rank); #endif log_timed_action (action, clock); @@ -465,22 +527,26 @@ static void action_reduce(const char *const *action) static void action_allReduce(const char *const *action) { double comm_size = parse_double(action[2]); double comp_size = parse_double(action[3]); - + if(action[4]) MPI_CURRENT_TYPE=decode_datatype(action[4]); else MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE; - + double clock = smpi_process_simulated_elapsed(); #ifdef HAVE_TRACING int rank = smpi_comm_rank(MPI_COMM_WORLD); - TRACE_smpi_computing_out(rank); - TRACE_smpi_collective_in(rank, -1, __FUNCTION__); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_ALLREDUCE; + extra->send_size = comm_size; + extra->comp_size = comp_size; + extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE); + + TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra); #endif - mpi_coll_reduce_fun(NULL, NULL, comm_size, MPI_CURRENT_TYPE, MPI_OP_NULL, 0, MPI_COMM_WORLD); + mpi_coll_reduce_fun(NULL, NULL, comm_size, MPI_CURRENT_TYPE, MPI_OP_NULL, 0, MPI_COMM_WORLD); smpi_execute_flops(comp_size); - mpi_coll_bcast_fun(NULL, comm_size, MPI_CURRENT_TYPE, 0, MPI_COMM_WORLD); + mpi_coll_bcast_fun(NULL, comm_size, MPI_CURRENT_TYPE, 0, MPI_COMM_WORLD); #ifdef HAVE_TRACING TRACE_smpi_collective_out(rank, -1, __FUNCTION__); - TRACE_smpi_computing_in(rank); #endif log_timed_action (action, clock); @@ -491,23 +557,35 @@ static void action_allToAll(const char *const *action) { int comm_size = smpi_comm_size(MPI_COMM_WORLD); int send_size = parse_double(action[2]); int recv_size = parse_double(action[3]); - void *send = xbt_new0(int, send_size*comm_size); - void *recv = xbt_new0(int, send_size*comm_size); - - if(action[4]) MPI_CURRENT_TYPE=decode_datatype(action[4]); - else MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE; + MPI_Datatype MPI_CURRENT_TYPE2; + + if(action[4]) { + MPI_CURRENT_TYPE=decode_datatype(action[4]); + MPI_CURRENT_TYPE2=decode_datatype(action[5]); + } + else { + MPI_CURRENT_TYPE=MPI_DEFAULT_TYPE; + MPI_CURRENT_TYPE2=MPI_DEFAULT_TYPE; + } + void *send = calloc(send_size*comm_size, smpi_datatype_size(MPI_CURRENT_TYPE)); + void *recv = calloc(recv_size*comm_size, smpi_datatype_size(MPI_CURRENT_TYPE2)); #ifdef HAVE_TRACING int rank = smpi_process_index(); - TRACE_smpi_computing_out(rank); - TRACE_smpi_collective_in(rank, -1, __FUNCTION__); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_ALLTOALL; + extra->send_size = send_size; + extra->recv_size = recv_size; + extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE); + extra->datatype2 = encode_datatype(MPI_CURRENT_TYPE2); + + TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra); #endif - - mpi_coll_alltoall_fun(send, send_size, MPI_CURRENT_TYPE, recv, recv_size, MPI_CURRENT_TYPE, MPI_COMM_WORLD); + + mpi_coll_alltoall_fun(send, send_size, MPI_CURRENT_TYPE, recv, recv_size, MPI_CURRENT_TYPE2, MPI_COMM_WORLD); #ifdef HAVE_TRACING TRACE_smpi_collective_out(rank, -1, __FUNCTION__); - TRACE_smpi_computing_in(rank); #endif log_timed_action (action, clock); @@ -515,62 +593,342 @@ static void action_allToAll(const char *const *action) { xbt_free(recv); } + +static void action_gather(const char *const *action) { + /* + The structure of the gather action for the rank 0 (total 4 processes) + is the following: + 0 gather 68 68 0 0 0 + + where: + 1) 68 is the sendcounts + 2) 68 is the recvcounts + 3) 0 is the root node + 4) 0 is the send datatype id, see decode_datatype() + 5) 0 is the recv datatype id, see decode_datatype() + */ + double clock = smpi_process_simulated_elapsed(); + int comm_size = smpi_comm_size(MPI_COMM_WORLD); + int send_size = parse_double(action[2]); + int recv_size = parse_double(action[3]); + MPI_Datatype MPI_CURRENT_TYPE2; + if(action[5]) { + MPI_CURRENT_TYPE=decode_datatype(action[5]); + MPI_CURRENT_TYPE2=decode_datatype(action[6]); + } else { + MPI_CURRENT_TYPE=MPI_DEFAULT_TYPE; + MPI_CURRENT_TYPE2=MPI_DEFAULT_TYPE; + } + void *send = calloc(send_size, smpi_datatype_size(MPI_CURRENT_TYPE)); + void *recv = NULL; + + int root=atoi(action[4]); + int rank = smpi_process_index(); + + if(rank==root) + recv = calloc(recv_size*comm_size, smpi_datatype_size(MPI_CURRENT_TYPE2)); + +#ifdef HAVE_TRACING + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_GATHER; + extra->send_size = send_size; + extra->recv_size = recv_size; + extra->root = root; + extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE); + extra->datatype2 = encode_datatype(MPI_CURRENT_TYPE2); + + TRACE_smpi_collective_in(rank, root, __FUNCTION__, extra); +#endif +smpi_mpi_gather(send, send_size, MPI_CURRENT_TYPE, + recv, recv_size, MPI_CURRENT_TYPE2, + root, MPI_COMM_WORLD); + +#ifdef HAVE_TRACING + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); +#endif + + log_timed_action (action, clock); + xbt_free(send); + xbt_free(recv); +} + + + +static void action_gatherv(const char *const *action) { + /* + The structure of the gatherv action for the rank 0 (total 4 processes) + is the following: + 0 gather 68 68 10 10 10 0 0 0 + + where: + 1) 68 is the sendcount + 2) 68 10 10 10 is the recvcounts + 3) 0 is the root node + 4) 0 is the send datatype id, see decode_datatype() + 5) 0 is the recv datatype id, see decode_datatype() + */ + double clock = smpi_process_simulated_elapsed(); + int comm_size = smpi_comm_size(MPI_COMM_WORLD); + int send_size = parse_double(action[2]); + int *disps = xbt_new0(int, comm_size); + int *recvcounts = xbt_new0(int, comm_size); + int i=0,recv_sum=0; + + MPI_Datatype MPI_CURRENT_TYPE2; + if(action[4+comm_size]) { + MPI_CURRENT_TYPE=decode_datatype(action[4+comm_size]); + MPI_CURRENT_TYPE2=decode_datatype(action[5+comm_size]); + } else { + MPI_CURRENT_TYPE=MPI_DEFAULT_TYPE; + MPI_CURRENT_TYPE2=MPI_DEFAULT_TYPE; + } + void *send = calloc(send_size, smpi_datatype_size(MPI_CURRENT_TYPE)); + void *recv = NULL; + for(i=0;itype = TRACING_GATHERV; + extra->send_size = send_size; + extra->recvcounts= xbt_malloc(comm_size*sizeof(int)); + for(i=0; i< comm_size; i++)//copy data to avoid bad free + extra->recvcounts[i] = recvcounts[i]; + extra->root = root; + extra->num_processes = comm_size; + extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE); + extra->datatype2 = encode_datatype(MPI_CURRENT_TYPE2); + + TRACE_smpi_collective_in(rank, root, __FUNCTION__, extra); +#endif +smpi_mpi_gatherv(send, send_size, MPI_CURRENT_TYPE, + recv, recvcounts, disps, MPI_CURRENT_TYPE2, + root, MPI_COMM_WORLD); + +#ifdef HAVE_TRACING + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); +#endif + + log_timed_action (action, clock); + xbt_free(send); + xbt_free(recv); +} + +static void action_reducescatter(const char *const *action) { + + /* + The structure of the reducescatter action for the rank 0 (total 4 processes) + is the following: +0 reduceScatter 275427 275427 275427 204020 11346849 0 + + where: + 1) The first four values after the name of the action declare the recvcounts array + 2) The value 11346849 is the amount of instructions + 3) The last value corresponds to the datatype, see decode_datatype(). + + We analyze a MPI_Reduce_scatter call to one MPI_Reduce and one MPI_Scatterv. + + */ + + double clock = smpi_process_simulated_elapsed(); + int comm_size = smpi_comm_size(MPI_COMM_WORLD); + int comp_size = parse_double(action[2+comm_size]); + int *recvcounts = xbt_new0(int, comm_size); + int *disps = xbt_new0(int, comm_size); + int i=0,recv_sum=0; + int root=0; + int rank = smpi_process_index(); + + if(action[3+comm_size]) + MPI_CURRENT_TYPE=decode_datatype(action[3+comm_size]); + else + MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE; + + for(i=0;itype = TRACING_REDUCE_SCATTER; + extra->send_size = 0; + extra->recvcounts= xbt_malloc(comm_size*sizeof(int)); + for(i=0; i< comm_size; i++)//copy data to avoid bad free + extra->recvcounts[i] = recvcounts[i]; + extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE); + extra->comp_size = comp_size; + extra->num_processes = comm_size; + + + TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra); +#endif + mpi_coll_reduce_fun(NULL, NULL, recv_sum, MPI_CURRENT_TYPE, MPI_OP_NULL, + root, MPI_COMM_WORLD); + smpi_mpi_scatterv(NULL, recvcounts, disps, MPI_CURRENT_TYPE, NULL, + recvcounts[rank], MPI_CURRENT_TYPE, 0, MPI_COMM_WORLD); + smpi_execute_flops(comp_size); + + +#ifdef HAVE_TRACING + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); +#endif + + log_timed_action (action, clock); +} + + +static void action_allgatherv(const char *const *action) { + + /* + The structure of the allgatherv action for the rank 0 (total 4 processes) + is the following: +0 allGatherV 275427 275427 275427 275427 204020 + + where: + 1) 275427 is the sendcount + 2) The next four elements declare the recvcounts array + 3) No more values mean that the datatype for sent and receive buffer + is the default one, see decode_datatype(). + + */ + + double clock = smpi_process_simulated_elapsed(); + + int comm_size = smpi_comm_size(MPI_COMM_WORLD); + int i=0; + int sendcount=atoi(action[2]); + int *recvcounts = xbt_new0(int, comm_size); + int *disps = xbt_new0(int, comm_size); + int recv_sum=0; + MPI_Datatype MPI_CURRENT_TYPE2; + + if(action[3+comm_size]) { + MPI_CURRENT_TYPE = decode_datatype(action[3+comm_size]); + MPI_CURRENT_TYPE2 = decode_datatype(action[4+comm_size]); + } else { + MPI_CURRENT_TYPE = MPI_DEFAULT_TYPE; + MPI_CURRENT_TYPE2 = MPI_DEFAULT_TYPE; + } + void *sendbuf = calloc(sendcount, smpi_datatype_size(MPI_CURRENT_TYPE)); + + for(i=0;itype = TRACING_ALLGATHERV; + extra->send_size = sendcount; + extra->recvcounts= xbt_malloc(comm_size*sizeof(int)); + for(i=0; i< comm_size; i++)//copy data to avoid bad free + extra->recvcounts[i] = recvcounts[i]; + extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE); + extra->datatype2 = encode_datatype(MPI_CURRENT_TYPE2); + extra->num_processes = comm_size; + + TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra); +#endif + +mpi_coll_allgatherv_fun(sendbuf, sendcount, MPI_CURRENT_TYPE, recvbuf, recvcounts, disps, MPI_CURRENT_TYPE2, MPI_COMM_WORLD); + +#ifdef HAVE_TRACING + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); +#endif + + log_timed_action (action, clock); + xbt_free(sendbuf); + xbt_free(recvbuf); + xbt_free(recvcounts); + xbt_free(disps); +} + + static void action_allToAllv(const char *const *action) { /* The structure of the allToAllV action for the rank 0 (total 4 processes) is the following: - 0 allToAllV 100 1 7 10 12 5 10 20 45 100 1 70 10 5 1 5 77 90 + 0 allToAllV 100 1 7 10 12 100 1 70 10 5 where: 1) 100 is the size of the send buffer *sizeof(int), 2) 1 7 10 12 is the sendcounts array - 3) 5 10 20 45 is the sdispls array - 4) 100*sizeof(int) is the size of the receiver buffer - 5) 1 70 10 5 is the recvcounts array - 6) 1 5 77 90 is the rdispls array - + 3) 100*sizeof(int) is the size of the receiver buffer + 4) 1 70 10 5 is the recvcounts array + */ - - + + double clock = smpi_process_simulated_elapsed(); - + int comm_size = smpi_comm_size(MPI_COMM_WORLD); int send_buf_size=0,recv_buf_size=0,i=0; int *sendcounts = xbt_new0(int, comm_size); int *recvcounts = xbt_new0(int, comm_size); int *senddisps = xbt_new0(int, comm_size); int *recvdisps = xbt_new0(int, comm_size); - - send_buf_size=parse_double(action[2]); - recv_buf_size=parse_double(action[3+2*comm_size]); - int *sendbuf = xbt_new0(int, send_buf_size); - int *recvbuf = xbt_new0(int, recv_buf_size); + MPI_Datatype MPI_CURRENT_TYPE2; - if(action[4+4*comm_size]) MPI_CURRENT_TYPE=decode_datatype(action[4+4*comm_size]); - else MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE; + send_buf_size=parse_double(action[2]); + recv_buf_size=parse_double(action[3+comm_size]); + if(action[4+2*comm_size]) { + MPI_CURRENT_TYPE=decode_datatype(action[4+2*comm_size]); + MPI_CURRENT_TYPE2=decode_datatype(action[5+2*comm_size]); + } + else { + MPI_CURRENT_TYPE=MPI_DEFAULT_TYPE; + MPI_CURRENT_TYPE2=MPI_DEFAULT_TYPE; + } + + void *sendbuf = calloc(send_buf_size, smpi_datatype_size(MPI_CURRENT_TYPE)); + void *recvbuf = calloc(recv_buf_size, smpi_datatype_size(MPI_CURRENT_TYPE2)); for(i=0;itype = TRACING_ALLTOALLV; + extra->recvcounts= xbt_malloc(comm_size*sizeof(int)); + extra->sendcounts= xbt_malloc(comm_size*sizeof(int)); + extra->num_processes = comm_size; + + for(i=0; i< comm_size; i++){//copy data to avoid bad free + extra->send_size += sendcounts[i]; + extra->sendcounts[i] = sendcounts[i]; + extra->recv_size += recvcounts[i]; + extra->recvcounts[i] = recvcounts[i]; + } + extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE); + extra->datatype2 = encode_datatype(MPI_CURRENT_TYPE2); + + TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra); #endif - mpi_coll_alltoallv_fun(sendbuf, sendcounts, senddisps, MPI_CURRENT_TYPE, + mpi_coll_alltoallv_fun(sendbuf, sendcounts, senddisps, MPI_CURRENT_TYPE, recvbuf, recvcounts, recvdisps, MPI_CURRENT_TYPE, MPI_COMM_WORLD); #ifdef HAVE_TRACING TRACE_smpi_collective_out(rank, -1, __FUNCTION__); - TRACE_smpi_computing_in(rank); #endif - + log_timed_action (action, clock); xbt_free(sendbuf); xbt_free(recvbuf); @@ -578,12 +936,21 @@ static void action_allToAllv(const char *const *action) { xbt_free(recvcounts); xbt_free(senddisps); xbt_free(recvdisps); - - } void smpi_replay_init(int *argc, char***argv){ - PMPI_Init(argc, argv); + smpi_process_init(argc, argv); + smpi_process_mark_as_initialized(); +#ifdef HAVE_TRACING + int rank = smpi_process_index(); + TRACE_smpi_init(rank); + TRACE_smpi_computing_init(rank); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_INIT; + TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); +#endif + if (!smpi_process_index()){ _xbt_replay_action_init(); xbt_replay_action_register("init", action_init); @@ -603,6 +970,10 @@ void smpi_replay_init(int *argc, char***argv){ xbt_replay_action_register("allReduce", action_allReduce); xbt_replay_action_register("allToAll", action_allToAll); xbt_replay_action_register("allToAllV", action_allToAllv); + xbt_replay_action_register("gather", action_gather); + xbt_replay_action_register("gatherV", action_gatherv); + xbt_replay_action_register("allGatherV", action_allgatherv); + xbt_replay_action_register("reduceScatter", action_reducescatter); xbt_replay_action_register("compute", action_compute); } @@ -625,5 +996,18 @@ int smpi_replay_finalize(){ xbt_free(reqq); reqq = NULL; } - return PMPI_Finalize(); + smpi_mpi_barrier(MPI_COMM_WORLD); +#ifdef HAVE_TRACING + int rank = smpi_process_index(); + instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); + extra->type = TRACING_FINALIZE; + TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); +#endif + smpi_process_finalize(); +#ifdef HAVE_TRACING + TRACE_smpi_collective_out(rank, -1, __FUNCTION__); + TRACE_smpi_finalize(smpi_process_index()); +#endif + smpi_process_destroy(); + return MPI_SUCCESS; }