X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/e7c0c67af63b3979a597a66e5e1c8b0435fc6e19..21446da234b79358c6f7ed918e582e7e5a36d8b3:/src/smpi/colls/allreduce-smp-binomial-pipeline.c diff --git a/src/smpi/colls/allreduce-smp-binomial-pipeline.c b/src/smpi/colls/allreduce-smp-binomial-pipeline.c index 05ecfd6cf8..06a80a7a0c 100644 --- a/src/smpi/colls/allreduce-smp-binomial-pipeline.c +++ b/src/smpi/colls/allreduce-smp-binomial-pipeline.c @@ -1,3 +1,9 @@ +/* Copyright (c) 2013-2014. The SimGrid Team. + * All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + #include "colls_private.h" /* IMPLEMENTED BY PITCH PATARASUK Non-topoloty-specific (however, number of cores/node need to be changed) @@ -6,11 +12,7 @@ inter-communication The communication are done in a pipeline fashion */ -/* change number of core per smp-node - we assume that number of core per process will be the same for all implementations */ -#ifndef NUM_CORE -#define NUM_CORE 8 -#endif + /* this is a default segment size for pipelining, but it is typically passed as a command line argument */ @@ -29,18 +31,6 @@ int allreduce_smp_binomial_pipeline_segment_size = 4096; This code assume commutative and associative reduce operator (MPI_SUM, MPI_MAX, etc). */ -#ifndef MPICH2 -extern void *MPIR_ToPointer(); - -struct MPIR_OP { - MPI_User_function *op; - int commute, permanent; -}; - -#else -extern MPI_User_function *MPIR_Op_table[]; -#endif - /* This fucntion performs all-reduce operation as follow. ** in a pipeline fashion ** 1) binomial_tree reduce inside each SMP node @@ -55,24 +45,22 @@ int smpi_coll_tuned_allreduce_smp_binomial_pipeline(void *send_buf, { int comm_size, rank; void *tmp_buf; - int tag = 50; + int tag = COLL_TAG_ALLREDUCE; int mask, src, dst; MPI_Status status; - int num_core = NUM_CORE; - - MPI_User_function *uop; -#ifndef MPICH2 - struct MPIR_OP *op_ptr = MPIR_ToPointer(op); - uop = (MPI_User_function *) op_ptr->op; -#else - uop = MPIR_Op_table[op % 16 - 1]; -#endif + if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ + smpi_comm_init_smp(comm); + } + int num_core=1; + if (smpi_comm_is_uniform(comm)){ + num_core = smpi_comm_size(smpi_comm_get_intra_comm(comm)); + } comm_size = smpi_comm_size(comm); rank = smpi_comm_rank(comm); MPI_Aint extent; extent = smpi_datatype_get_extent(dtype); - tmp_buf = (void *) xbt_malloc(count * extent); + tmp_buf = (void *) smpi_get_tmp_sendbuffer(count * extent); int intra_rank, inter_rank; intra_rank = rank % num_core; @@ -111,7 +99,7 @@ int smpi_coll_tuned_allreduce_smp_binomial_pipeline(void *send_buf, if (src < comm_size) { recv_offset = phase * pcount * extent; smpi_mpi_recv(tmp_buf, pcount, dtype, src, tag, comm, &status); - (*uop) (tmp_buf, (char *)recv_buf + recv_offset, &pcount, &dtype); + smpi_op_apply(op, tmp_buf, (char *)recv_buf + recv_offset, &pcount, &dtype); } } else { send_offset = phase * pcount * extent; @@ -135,7 +123,7 @@ int smpi_coll_tuned_allreduce_smp_binomial_pipeline(void *send_buf, if (src < comm_size) { recv_offset = (phase - 1) * pcount * extent; smpi_mpi_recv(tmp_buf, pcount, dtype, src, tag, comm, &status); - (*uop) (tmp_buf, (char *)recv_buf + recv_offset, &pcount, &dtype); + smpi_op_apply(op, tmp_buf, (char *)recv_buf + recv_offset, &pcount, &dtype); } } else { dst = (inter_rank & (~mask)) * num_core; @@ -209,6 +197,6 @@ int smpi_coll_tuned_allreduce_smp_binomial_pipeline(void *send_buf, } } // for phase - free(tmp_buf); + smpi_free_tmp_buffer(tmp_buf); return MPI_SUCCESS; }