Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
Merge branch 'master' of git+ssh://scm.gforge.inria.fr//gitroot/simgrid/simgrid
[simgrid.git] / src / smpi / colls / smpi_automatic_selector.c
1 #include "colls_private.h"
2 #include <limits.h>
3 #include "mc/mc_private.h"
4
5 //attempt to do a quick autotuning version of the collective,
6
7 #define AUTOMATIC_COLL_BENCH(cat, ret, args, args2)\
8     ret smpi_coll_tuned_ ## cat ## _ ## automatic(COLL_UNPAREN args)\
9 {\
10   double time1, time2, time_min=INT_MAX;\
11   int min_coll=-1, global_coll=-1;\
12   int i;\
13   double buf_in, buf_out, max_min=INT_MAX;\
14   for (i = 0; mpi_coll_##cat##_description[i].name; i++){\
15       if(!strcmp(mpi_coll_##cat##_description[i].name, "automatic"))continue;\
16       if(!strcmp(mpi_coll_##cat##_description[i].name, "default"))continue;\
17       smpi_mpi_barrier(comm);\
18       if (TRACE_is_enabled()){\
19         type_t type = PJ_type_get_or_null (#cat, PJ_type_get_root());\
20          if (!type){\
21              type=PJ_type_event_new(#cat, PJ_type_get_root());\
22          }\
23          char* cont_name=malloc(25*sizeof(char*));\
24          sprintf(cont_name, "rank-%d", smpi_process_index());\
25          val_t value = PJ_value_get_or_new(mpi_coll_##cat##_description[i].name,"1.0 1.0 1.0", type);\
26          new_pajeNewEvent (SIMIX_get_clock(), PJ_container_get(cont_name), type, value);\
27       }\
28       time1 = SIMIX_get_clock();\
29       ((int (*) args)\
30           mpi_coll_##cat##_description[i].coll) args2 ;\
31       time2 = SIMIX_get_clock();\
32       buf_out=time2-time1;\
33       smpi_mpi_reduce((void*)&buf_out,(void*)&buf_in, 1, MPI_DOUBLE, MPI_MAX, 0,comm );\
34       if(time2-time1<time_min){\
35           min_coll=i;\
36           time_min=time2-time1;\
37       }\
38       if(smpi_comm_rank(comm)==0){\
39           if(buf_in<max_min){\
40               max_min=buf_in;\
41               global_coll=i;\
42           }\
43       }\
44   }\
45   if(smpi_comm_rank(comm)==0){\
46       XBT_WARN("For rank 0, the quickest was %s : %lf , but global was %s : %lf at max",mpi_coll_##cat##_description[min_coll].name, time_min,mpi_coll_##cat##_description[global_coll].name, max_min);\
47   }else\
48   XBT_WARN("The quickest reduce_scatter was %s on rank %d and took %lf",mpi_coll_##cat##_description[min_coll].name, smpi_comm_rank(comm), time_min);\
49   return (min_coll!=-1)?MPI_SUCCESS:MPI_ERR_INTERN;\
50 }\
51
52
53 COLL_APPLY(AUTOMATIC_COLL_BENCH, COLL_ALLGATHERV_SIG, (send_buff, send_count, send_type, recv_buff, recv_count, recv_disps, recv_type, comm));
54 COLL_APPLY(AUTOMATIC_COLL_BENCH, COLL_ALLREDUCE_SIG, (sbuf, rbuf, rcount, dtype, op, comm));
55 COLL_APPLY(AUTOMATIC_COLL_BENCH, COLL_GATHER_SIG, (send_buff, send_count, send_type, recv_buff, recv_count, recv_type, root, comm));
56 COLL_APPLY(AUTOMATIC_COLL_BENCH, COLL_ALLGATHER_SIG, (send_buff,send_count,send_type,recv_buff,recv_count,recv_type,comm));
57 COLL_APPLY(AUTOMATIC_COLL_BENCH, COLL_ALLTOALL_SIG,(send_buff, send_count, send_type, recv_buff, recv_count, recv_type,comm));
58 COLL_APPLY(AUTOMATIC_COLL_BENCH, COLL_ALLTOALLV_SIG, (send_buff, send_counts, send_disps, send_type, recv_buff, recv_counts, recv_disps, recv_type, comm));
59 COLL_APPLY(AUTOMATIC_COLL_BENCH, COLL_BCAST_SIG , (buf, count, datatype, root, comm));
60 COLL_APPLY(AUTOMATIC_COLL_BENCH, COLL_REDUCE_SIG,(buf,rbuf, count, datatype, op, root, comm));
61 COLL_APPLY(AUTOMATIC_COLL_BENCH, COLL_REDUCE_SCATTER_SIG ,(sbuf,rbuf, rcounts,dtype,op,comm));
62 COLL_APPLY(AUTOMATIC_COLL_BENCH, COLL_SCATTER_SIG ,(sendbuf, sendcount, sendtype,recvbuf, recvcount, recvtype,root, comm));
63 COLL_APPLY(AUTOMATIC_COLL_BENCH, COLL_BARRIER_SIG,(comm));