+
+static void action_reducescatter(const char *const *action) {
+
+ /*
+ The structure of the reducescatter action for the rank 0 (total 4 processes)
+ is the following:
+0 reduceScatter 275427 275427 275427 204020 11346849 0
+
+ where:
+ 1) The first four values after the name of the action declare the recvcounts array
+ 2) The value 11346849 is the amount of instructions
+ 3) The last value corresponds to the datatype, see decode_datatype().
+
+ We analyze a MPI_Reduce_scatter call to one MPI_Reduce and one MPI_Scatterv.
+
+ */
+
+ double clock = smpi_process_simulated_elapsed();
+ int comm_size = smpi_comm_size(MPI_COMM_WORLD);
+ int comp_size = parse_double(action[2+comm_size]);
+ int *recvcounts = xbt_new0(int, comm_size);
+ int *disps = xbt_new0(int, comm_size);
+ int i=0,recv_sum=0;
+ int root=0;
+ int rank = smpi_process_index();
+
+ if(action[3+comm_size])
+ MPI_CURRENT_TYPE=decode_datatype(action[3+comm_size]);
+ else
+ MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE;
+
+ for(i=0;i<comm_size;i++) {
+ recvcounts[i] = atoi(action[i+2]);
+ recv_sum=recv_sum+recvcounts[i];
+ disps[i] = 0;
+ }
+
+#ifdef HAVE_TRACING
+ TRACE_smpi_computing_out(rank);
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__);
+#endif
+ mpi_coll_reduce_fun(NULL, NULL, recv_sum, MPI_CURRENT_TYPE, MPI_OP_NULL,
+ root, MPI_COMM_WORLD);
+ smpi_mpi_scatterv(NULL, recvcounts, disps, MPI_CURRENT_TYPE, NULL,
+ recvcounts[rank], MPI_CURRENT_TYPE, 0, MPI_COMM_WORLD);
+ smpi_execute_flops(comp_size);
+
+
+#ifdef HAVE_TRACING
+ TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
+ TRACE_smpi_computing_in(rank);
+#endif
+
+ log_timed_action (action, clock);
+}
+
+
+static void action_allgatherv(const char *const *action) {
+
+ /*
+ The structure of the allgatherv action for the rank 0 (total 4 processes)
+ is the following:
+0 allGatherV 275427 275427 275427 275427 204020 0 275427 550854 826281
+
+ where:
+ 1) 275427 is the sendcount
+ 2) The next four elements declare the recvcounts array
+ 3) The next four values declare the disps array
+ 4) No more values mean that the datatype for sent and receive buffer
+ is the default one, see decode_datatype().
+
+ */
+
+ double clock = smpi_process_simulated_elapsed();
+
+ int comm_size = smpi_comm_size(MPI_COMM_WORLD);
+ int i=0;
+ int sendcount=atoi(action[2]);
+ int *recvcounts = xbt_new0(int, comm_size);
+ int *disps = xbt_new0(int, comm_size);
+ int recv_sum=0;
+ MPI_Datatype MPI_CURRENT_TYPE2;
+
+ if(action[3+2*comm_size]) {
+ MPI_CURRENT_TYPE = decode_datatype(action[3+2*comm_size]);
+ MPI_CURRENT_TYPE2 = decode_datatype(action[4+2*comm_size]);
+ } else {
+ MPI_CURRENT_TYPE = MPI_DEFAULT_TYPE;
+ MPI_CURRENT_TYPE2 = MPI_DEFAULT_TYPE;
+ }
+ void *sendbuf = calloc(sendcount, smpi_datatype_size(MPI_CURRENT_TYPE));
+
+ for(i=0;i<comm_size;i++) {
+ recvcounts[i] = atoi(action[i+3]);
+ recv_sum=recv_sum+recvcounts[i];
+ disps[i] = atoi(action[i+3+comm_size]);
+ }
+ void *recvbuf = calloc(recv_sum, smpi_datatype_size(MPI_CURRENT_TYPE2));
+
+#ifdef HAVE_TRACING
+ int rank = MPI_COMM_WORLD != MPI_COMM_NULL ? smpi_process_index() : -1;
+ TRACE_smpi_computing_out(rank);
+ TRACE_smpi_collective_in(rank, -1, __FUNCTION__);
+#endif
+
+mpi_coll_allgatherv_fun(sendbuf, sendcount, MPI_CURRENT_TYPE, recvbuf, recvcounts, disps, MPI_CURRENT_TYPE2, MPI_COMM_WORLD);
+
+#ifdef HAVE_TRACING
+ TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
+ TRACE_smpi_computing_in(rank);
+#endif
+
+ log_timed_action (action, clock);
+ xbt_free(sendbuf);
+ xbt_free(recvbuf);
+ xbt_free(recvcounts);
+ xbt_free(disps);
+}
+
+