Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
Don't include mc_private.h when \!HAVE_MC.
[simgrid.git] / src / smpi / colls / smpi_automatic_selector.c
1 #include "colls_private.h"
2 #ifdef HAVE_MC
3 #include "mc/mc_private.h"
4 #endif
5 #include <float.h>
6
7 //attempt to do a quick autotuning version of the collective,
8
9 #ifdef HAVE_TRACING
10 #define TRACE_AUTO_COLL(cat) if (TRACE_is_enabled()){\
11         type_t type = PJ_type_get_or_null (#cat, PJ_type_get_root());\
12          if (!type){\
13              type=PJ_type_event_new(#cat, PJ_type_get_root());\
14          }\
15          char cont_name[25];\
16          sprintf(cont_name, "rank-%d", smpi_process_index());\
17          val_t value = PJ_value_get_or_new(mpi_coll_##cat##_description[i].name,"1.0 1.0 1.0", type);\
18          new_pajeNewEvent (SIMIX_get_clock(), PJ_container_get(cont_name), type, value);\
19       }
20 #else
21 #define TRACE_AUTO_COLL(cat)
22 #endif
23
24
25 #define AUTOMATIC_COLL_BENCH(cat, ret, args, args2)\
26     ret smpi_coll_tuned_ ## cat ## _ ## automatic(COLL_UNPAREN args)\
27 {\
28   double time1, time2, time_min=DBL_MAX;\
29   volatile int min_coll=-1, global_coll=-1;\
30   volatile int i;\
31   xbt_ex_t ex;\
32   double buf_in, buf_out, max_min=DBL_MAX;\
33   for (i = 0; mpi_coll_##cat##_description[i].name; i++){\
34       if(!strcmp(mpi_coll_##cat##_description[i].name, "automatic"))continue;\
35       if(!strcmp(mpi_coll_##cat##_description[i].name, "default"))continue;\
36       smpi_mpi_barrier(comm);\
37       TRACE_AUTO_COLL(cat)\
38       time1 = SIMIX_get_clock();\
39       TRY{\
40       ((int (*) args)\
41           mpi_coll_##cat##_description[i].coll) args2 ;\
42       }\
43       CATCH(ex) {\
44         xbt_ex_free(ex);\
45         continue;\
46       }\
47       time2 = SIMIX_get_clock();\
48       buf_out=time2-time1;\
49       smpi_mpi_reduce((void*)&buf_out,(void*)&buf_in, 1, MPI_DOUBLE, MPI_MAX, 0,comm );\
50       if(time2-time1<time_min){\
51           min_coll=i;\
52           time_min=time2-time1;\
53       }\
54       if(smpi_comm_rank(comm)==0){\
55           if(buf_in<max_min){\
56               max_min=buf_in;\
57               global_coll=i;\
58           }\
59       }\
60   }\
61   if(smpi_comm_rank(comm)==0){\
62       XBT_WARN("For rank 0, the quickest was %s : %f , but global was %s : %f at max",mpi_coll_##cat##_description[min_coll].name, time_min,mpi_coll_##cat##_description[global_coll].name, max_min);\
63   }else\
64   XBT_WARN("The quickest %s was %s on rank %d and took %f",#cat,mpi_coll_##cat##_description[min_coll].name, smpi_comm_rank(comm), time_min);\
65   return (min_coll!=-1)?MPI_SUCCESS:MPI_ERR_INTERN;\
66 }\
67
68
69 COLL_APPLY(AUTOMATIC_COLL_BENCH, COLL_ALLGATHERV_SIG, (send_buff, send_count, send_type, recv_buff, recv_count, recv_disps, recv_type, comm));
70 COLL_APPLY(AUTOMATIC_COLL_BENCH, COLL_ALLREDUCE_SIG, (sbuf, rbuf, rcount, dtype, op, comm));
71 COLL_APPLY(AUTOMATIC_COLL_BENCH, COLL_GATHER_SIG, (send_buff, send_count, send_type, recv_buff, recv_count, recv_type, root, comm));
72 COLL_APPLY(AUTOMATIC_COLL_BENCH, COLL_ALLGATHER_SIG, (send_buff,send_count,send_type,recv_buff,recv_count,recv_type,comm));
73 COLL_APPLY(AUTOMATIC_COLL_BENCH, COLL_ALLTOALL_SIG,(send_buff, send_count, send_type, recv_buff, recv_count, recv_type,comm));
74 COLL_APPLY(AUTOMATIC_COLL_BENCH, COLL_ALLTOALLV_SIG, (send_buff, send_counts, send_disps, send_type, recv_buff, recv_counts, recv_disps, recv_type, comm));
75 COLL_APPLY(AUTOMATIC_COLL_BENCH, COLL_BCAST_SIG , (buf, count, datatype, root, comm));
76 COLL_APPLY(AUTOMATIC_COLL_BENCH, COLL_REDUCE_SIG,(buf,rbuf, count, datatype, op, root, comm));
77 COLL_APPLY(AUTOMATIC_COLL_BENCH, COLL_REDUCE_SCATTER_SIG ,(sbuf,rbuf, rcounts,dtype,op,comm));
78 COLL_APPLY(AUTOMATIC_COLL_BENCH, COLL_SCATTER_SIG ,(sendbuf, sendcount, sendtype,recvbuf, recvcount, recvtype,root, comm));
79 COLL_APPLY(AUTOMATIC_COLL_BENCH, COLL_BARRIER_SIG,(comm));