X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/03b9cd8063ce997bf46d80291c7e54ec8480ad01..d92bc6ee22885a3c92996b6c9e749e6363bdb6ba:/src/smpi/colls/smpi_openmpi_selector.c diff --git a/src/smpi/colls/smpi_openmpi_selector.c b/src/smpi/colls/smpi_openmpi_selector.c index 55f9879e51..49a12c3403 100644 --- a/src/smpi/colls/smpi_openmpi_selector.c +++ b/src/smpi/colls/smpi_openmpi_selector.c @@ -1,6 +1,6 @@ /* selector for collective algorithms based on openmpi's default coll_tuned_decision_fixed selector */ -/* Copyright (c) 2009, 2010. The SimGrid Team. +/* Copyright (c) 2009-2010, 2013. The SimGrid Team. * All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it @@ -76,12 +76,12 @@ int smpi_coll_tuned_alltoall_ompi( void *sbuf, int scount, comm); } else if (block_dsize < 3000) { - return smpi_coll_tuned_alltoall_simple(sbuf, scount, sdtype, + return smpi_coll_tuned_alltoall_basic_linear(sbuf, scount, sdtype, rbuf, rcount, rdtype, comm); } - return smpi_coll_tuned_alltoall_pair (sbuf, scount, sdtype, + return smpi_coll_tuned_alltoall_ring (sbuf, scount, sdtype, rbuf, rcount, rdtype, comm); } @@ -94,32 +94,32 @@ int smpi_coll_tuned_alltoallv_ompi(void *sbuf, int *scounts, int *sdisps, ) { /* For starters, just keep the original algorithm. */ - return smpi_coll_tuned_alltoallv_bruck(sbuf, scounts, sdisps, sdtype, + return smpi_coll_tuned_alltoallv_ompi_basic_linear(sbuf, scounts, sdisps, sdtype, rbuf, rcounts, rdisps,rdtype, comm); } -/* -void smpi_coll_tuned_barrier_ompi(MPI_Comm comm) + +int smpi_coll_tuned_barrier_ompi(MPI_Comm comm) { int communicator_size = smpi_comm_size(comm); if( 2 == communicator_size ) - return smpi_coll_tuned_barrier_intra_two_procs(comm, module); - * Basic optimisation. If we have a power of 2 number of nodes - * the use the recursive doubling algorithm, otherwise - * bruck is the one we want. + return smpi_coll_tuned_barrier_ompi_two_procs(comm); +/* * Basic optimisation. If we have a power of 2 number of nodes*/ +/* * the use the recursive doubling algorithm, otherwise*/ +/* * bruck is the one we want.*/ { - bool has_one = false; + int has_one = 0; for( ; communicator_size > 0; communicator_size >>= 1 ) { if( communicator_size & 0x1 ) { if( has_one ) - return smpi_coll_tuned_barrier_intra_bruck(comm, module); - has_one = true; + return smpi_coll_tuned_barrier_ompi_bruck(comm); + has_one = 1; } } } - return smpi_coll_tuned_barrier_intra_recursivedoubling(comm, module); -}*/ + return smpi_coll_tuned_barrier_ompi_recursivedoubling(comm); +} int smpi_coll_tuned_bcast_ompi(void *buff, int count, MPI_Datatype datatype, int root, @@ -324,11 +324,11 @@ int smpi_coll_tuned_reduce_ompi( void *sendbuf, void *recvbuf, #endif /* 0 */ } -/*int smpi_coll_tuned_reduce_scatter_ompi( void *sbuf, void *rbuf, +int smpi_coll_tuned_reduce_scatter_ompi( void *sbuf, void *rbuf, int *rcounts, MPI_Datatype dtype, MPI_Op op, - MPI_Comm comm, + MPI_Comm comm ) { int comm_size, i, pow2; @@ -337,25 +337,26 @@ int smpi_coll_tuned_reduce_ompi( void *sendbuf, void *recvbuf, const double b = 8.0; const size_t small_message_size = 12 * 1024; const size_t large_message_size = 256 * 1024; - bool zerocounts = false; - - OPAL_OUTPUT((smpi_coll_tuned_stream, "smpi_coll_tuned_reduce_scatter_ompi")); + int zerocounts = 0; + XBT_DEBUG("smpi_coll_tuned_reduce_scatter_ompi"); + comm_size = smpi_comm_size(comm); // We need data size for decision function - ompi_datatype_type_size(dtype, &dsize); + dsize=smpi_datatype_size(dtype); total_message_size = 0; for (i = 0; i < comm_size; i++) { total_message_size += rcounts[i]; if (0 == rcounts[i]) { - zerocounts = true; + zerocounts = 1; } } - if( !ompi_op_is_commute(op) || (zerocounts)) { - return smpi_coll_tuned_reduce_scatter_intra_nonoverlapping (sbuf, rbuf, rcounts, + if( !smpi_op_is_commute(op) || (zerocounts)) { + smpi_mpi_reduce_scatter (sbuf, rbuf, rcounts, dtype, op, - comm, module); + comm); + return MPI_SUCCESS; } total_message_size *= dsize; @@ -367,20 +368,17 @@ int smpi_coll_tuned_reduce_ompi( void *sendbuf, void *recvbuf, ((total_message_size <= large_message_size) && (pow2 == comm_size)) || (comm_size >= a * total_message_size + b)) { return - smpi_coll_tuned_reduce_scatter_intra_basic_recursivehalving(sbuf, rbuf, rcounts, + smpi_coll_tuned_reduce_scatter_ompi_basic_recursivehalving(sbuf, rbuf, rcounts, dtype, op, - comm, module); + comm); } - return smpi_coll_tuned_reduce_scatter_intra_ring(sbuf, rbuf, rcounts, + return smpi_coll_tuned_reduce_scatter_ompi_ring(sbuf, rbuf, rcounts, dtype, op, - comm, module); + comm); + - - return smpi_coll_tuned_reduce_scatter(sbuf, rbuf, rcounts, - dtype, op, - comm; -}*/ +} int smpi_coll_tuned_allgather_ompi(void *sbuf, int scount, MPI_Datatype sdtype, @@ -426,15 +424,15 @@ int smpi_coll_tuned_allgather_ompi(void *sbuf, int scount, comm); } } else { - //if (communicator_size % 2) { + if (communicator_size % 2) { return smpi_coll_tuned_allgather_ring(sbuf, scount, sdtype, rbuf, rcount, rdtype, comm); - /*} else { - return smpi_coll_tuned_allgather_intra_neighborexchange(sbuf, scount, sdtype, + } else { + return smpi_coll_tuned_allgather_ompi_neighborexchange(sbuf, scount, sdtype, rbuf, rcount, rdtype, - comm, module); - }*/ + comm); + } } #if defined(USE_MPICH2_DECISION) @@ -448,17 +446,17 @@ int smpi_coll_tuned_allgather_ompi(void *sbuf, int scount, - for everything else use ring. */ if ((pow2_size == communicator_size) && (total_dsize < 524288)) { - return smpi_coll_tuned_allgather_intra_recursivedoubling(sbuf, scount, sdtype, + return smpi_coll_tuned_allgather_rdb(sbuf, scount, sdtype, rbuf, rcount, rdtype, - comm, module); + comm); } else if (total_dsize <= 81920) { - return smpi_coll_tuned_allgather_intra_bruck(sbuf, scount, sdtype, + return smpi_coll_tuned_allgather_bruck(sbuf, scount, sdtype, rbuf, rcount, rdtype, - comm, module); + comm); } - return smpi_coll_tuned_allgather_intra_ring(sbuf, scount, sdtype, + return smpi_coll_tuned_allgather_ring(sbuf, scount, sdtype, rbuf, rcount, rdtype, - comm, module); + comm); #endif /* defined(USE_MPICH2_DECISION) */ } @@ -500,30 +498,30 @@ int smpi_coll_tuned_allgatherv_ompi(void *sbuf, int scount, comm); } else { -// if (communicator_size % 2) { + if (communicator_size % 2) { return smpi_coll_tuned_allgatherv_ring(sbuf, scount, sdtype, rbuf, rcounts, rdispls, rdtype, comm); -/* } else { - return smpi_coll_tuned_allgatherv_intra_neighborexchange(sbuf, scount, sdtype, + } else { + return smpi_coll_tuned_allgatherv_ompi_neighborexchange(sbuf, scount, sdtype, rbuf, rcounts, rdispls, rdtype, - comm, module); - }*/ + comm); + } } } -/* + int smpi_coll_tuned_gather_ompi(void *sbuf, int scount, MPI_Datatype sdtype, void* rbuf, int rcount, MPI_Datatype rdtype, int root, - MPI_Comm comm, + MPI_Comm comm ) { - const int large_segment_size = 32768; - const int small_segment_size = 1024; + //const int large_segment_size = 32768; + //const int small_segment_size = 1024; - const size_t large_block_size = 92160; + //const size_t large_block_size = 92160; const size_t intermediate_block_size = 6000; const size_t small_block_size = 1024; @@ -533,52 +531,49 @@ int smpi_coll_tuned_gather_ompi(void *sbuf, int scount, int communicator_size, rank; size_t dsize, block_size; - OPAL_OUTPUT((smpi_coll_tuned_stream, - "smpi_coll_tuned_gather_ompi")); + XBT_DEBUG("smpi_coll_tuned_gather_ompi"); communicator_size = smpi_comm_size(comm); - rank = ompi_comm_rank(comm); + rank = smpi_comm_rank(comm); // Determine block size if (rank == root) { - ompi_datatype_type_size(rdtype, &dsize); + dsize = smpi_datatype_size(rdtype); block_size = dsize * rcount; } else { - ompi_datatype_type_size(sdtype, &dsize); + dsize = smpi_datatype_size(sdtype); block_size = dsize * scount; } - if (block_size > large_block_size) { - return smpi_coll_tuned_gather_intra_linear_sync (sbuf, scount, sdtype, - rbuf, rcount, rdtype, - root, comm, module, - large_segment_size); +/* if (block_size > large_block_size) {*/ +/* return smpi_coll_tuned_gather_ompi_linear_sync (sbuf, scount, sdtype, */ +/* rbuf, rcount, rdtype, */ +/* root, comm);*/ - } else if (block_size > intermediate_block_size) { - return smpi_coll_tuned_gather_intra_linear_sync (sbuf, scount, sdtype, +/* } else*/ if (block_size > intermediate_block_size) { + return smpi_coll_tuned_gather_ompi_linear_sync (sbuf, scount, sdtype, rbuf, rcount, rdtype, - root, comm, module, - small_segment_size); + root, comm); } else if ((communicator_size > large_communicator_size) || ((communicator_size > small_communicator_size) && (block_size < small_block_size))) { - return smpi_coll_tuned_gather_intra_binomial (sbuf, scount, sdtype, + return smpi_coll_tuned_gather_ompi_binomial (sbuf, scount, sdtype, rbuf, rcount, rdtype, - root, comm, module); + root, comm); } // Otherwise, use basic linear - return smpi_coll_tuned_gather_intra_basic_linear (sbuf, scount, sdtype, + return smpi_coll_tuned_gather_ompi_basic_linear (sbuf, scount, sdtype, rbuf, rcount, rdtype, - root, comm, module); -}*/ -/* + root, comm); +} + int smpi_coll_tuned_scatter_ompi(void *sbuf, int scount, MPI_Datatype sdtype, void* rbuf, int rcount, MPI_Datatype rdtype, - int root, MPI_Comm comm, + int root, MPI_Comm comm ) { const size_t small_block_size = 300; @@ -586,28 +581,36 @@ int smpi_coll_tuned_scatter_ompi(void *sbuf, int scount, int communicator_size, rank; size_t dsize, block_size; - OPAL_OUTPUT((smpi_coll_tuned_stream, - "smpi_coll_tuned_scatter_ompi")); + XBT_DEBUG("smpi_coll_tuned_scatter_ompi"); communicator_size = smpi_comm_size(comm); - rank = ompi_comm_rank(comm); + rank = smpi_comm_rank(comm); // Determine block size if (root == rank) { - ompi_datatype_type_size(sdtype, &dsize); + dsize=smpi_datatype_size(sdtype); block_size = dsize * scount; } else { - ompi_datatype_type_size(rdtype, &dsize); + dsize=smpi_datatype_size(rdtype); block_size = dsize * rcount; } if ((communicator_size > small_comm_size) && (block_size < small_block_size)) { - return smpi_coll_tuned_scatter_intra_binomial (sbuf, scount, sdtype, - rbuf, rcount, rdtype, - root, comm, module); + if(rank!=root){ + sbuf=xbt_malloc(rcount*smpi_datatype_get_extent(rdtype)); + scount=rcount; + sdtype=rdtype; + } + int ret=smpi_coll_tuned_scatter_ompi_binomial (sbuf, scount, sdtype, + rbuf, rcount, rdtype, + root, comm); + if(rank!=root){ + xbt_free(sbuf); + } + return ret; } - return smpi_coll_tuned_scatter_intra_basic_linear (sbuf, scount, sdtype, + return smpi_coll_tuned_scatter_ompi_basic_linear (sbuf, scount, sdtype, rbuf, rcount, rdtype, - root, comm, module); -}*/ + root, comm); +}