X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/d155fd69fa99c97b3a9c86bb7f2e472c2e7332df..1687df79d61a9418bba830bbd0ab7de16e457090:/src/smpi/colls/allreduce-smp-binomial-pipeline.c diff --git a/src/smpi/colls/allreduce-smp-binomial-pipeline.c b/src/smpi/colls/allreduce-smp-binomial-pipeline.c index b5efc1eb29..06a80a7a0c 100644 --- a/src/smpi/colls/allreduce-smp-binomial-pipeline.c +++ b/src/smpi/colls/allreduce-smp-binomial-pipeline.c @@ -1,4 +1,10 @@ -#include "colls.h" +/* Copyright (c) 2013-2014. The SimGrid Team. + * All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +#include "colls_private.h" /* IMPLEMENTED BY PITCH PATARASUK Non-topoloty-specific (however, number of cores/node need to be changed) all-reduce operation designed for smp clusters @@ -6,11 +12,7 @@ inter-communication The communication are done in a pipeline fashion */ -/* change number of core per smp-node - we assume that number of core per process will be the same for all implementations */ -#ifndef NUM_CORE -#define NUM_CORE 8 -#endif + /* this is a default segment size for pipelining, but it is typically passed as a command line argument */ @@ -29,18 +31,6 @@ int allreduce_smp_binomial_pipeline_segment_size = 4096; This code assume commutative and associative reduce operator (MPI_SUM, MPI_MAX, etc). */ -#ifndef MPICH2 -extern void *MPIR_ToPointer(); - -struct MPIR_OP { - MPI_User_function *op; - int commute, permanent; -}; - -#else -extern MPI_User_function *MPIR_Op_table[]; -#endif - /* This fucntion performs all-reduce operation as follow. ** in a pipeline fashion ** 1) binomial_tree reduce inside each SMP node @@ -55,24 +45,22 @@ int smpi_coll_tuned_allreduce_smp_binomial_pipeline(void *send_buf, { int comm_size, rank; void *tmp_buf; - int tag = 50; + int tag = COLL_TAG_ALLREDUCE; int mask, src, dst; MPI_Status status; - int num_core = NUM_CORE; - - MPI_User_function *uop; -#ifndef MPICH2 - struct MPIR_OP *op_ptr = MPIR_ToPointer(op); - uop = (MPI_User_function *) op_ptr->op; -#else - uop = MPIR_Op_table[op % 16 - 1]; -#endif - - MPI_Comm_size(comm, &comm_size); - MPI_Comm_rank(comm, &rank); + if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ + smpi_comm_init_smp(comm); + } + int num_core=1; + if (smpi_comm_is_uniform(comm)){ + num_core = smpi_comm_size(smpi_comm_get_intra_comm(comm)); + } + + comm_size = smpi_comm_size(comm); + rank = smpi_comm_rank(comm); MPI_Aint extent; - MPI_Type_extent(dtype, &extent); - tmp_buf = (void *) malloc(count * extent); + extent = smpi_datatype_get_extent(dtype); + tmp_buf = (void *) smpi_get_tmp_sendbuffer(count * extent); int intra_rank, inter_rank; intra_rank = rank % num_core; @@ -91,7 +79,7 @@ int smpi_coll_tuned_allreduce_smp_binomial_pipeline(void *send_buf, int inter_comm_size = (comm_size + num_core - 1) / num_core; /* copy input buffer to output buffer */ - MPI_Sendrecv(send_buf, count, dtype, rank, tag, + smpi_mpi_sendrecv(send_buf, count, dtype, rank, tag, recv_buf, count, dtype, rank, tag, comm, &status); /* compute pipe length */ @@ -110,13 +98,13 @@ int smpi_coll_tuned_allreduce_smp_binomial_pipeline(void *send_buf, src = (inter_rank * num_core) + (intra_rank | mask); if (src < comm_size) { recv_offset = phase * pcount * extent; - MPI_Recv(tmp_buf, pcount, dtype, src, tag, comm, &status); - (*uop) (tmp_buf, (char *)recv_buf + recv_offset, &pcount, &dtype); + smpi_mpi_recv(tmp_buf, pcount, dtype, src, tag, comm, &status); + smpi_op_apply(op, tmp_buf, (char *)recv_buf + recv_offset, &pcount, &dtype); } } else { send_offset = phase * pcount * extent; dst = (inter_rank * num_core) + (intra_rank & (~mask)); - MPI_Send((char *)recv_buf + send_offset, pcount, dtype, dst, tag, comm); + smpi_mpi_send((char *)recv_buf + send_offset, pcount, dtype, dst, tag, comm); break; } mask <<= 1; @@ -134,13 +122,13 @@ int smpi_coll_tuned_allreduce_smp_binomial_pipeline(void *send_buf, src = (inter_rank | mask) * num_core; if (src < comm_size) { recv_offset = (phase - 1) * pcount * extent; - MPI_Recv(tmp_buf, pcount, dtype, src, tag, comm, &status); - (*uop) (tmp_buf, (char *)recv_buf + recv_offset, &pcount, &dtype); + smpi_mpi_recv(tmp_buf, pcount, dtype, src, tag, comm, &status); + smpi_op_apply(op, tmp_buf, (char *)recv_buf + recv_offset, &pcount, &dtype); } } else { dst = (inter_rank & (~mask)) * num_core; send_offset = (phase - 1) * pcount * extent; - MPI_Send((char *)recv_buf + send_offset, pcount, dtype, dst, tag, comm); + smpi_mpi_send((char *)recv_buf + send_offset, pcount, dtype, dst, tag, comm); break; } mask <<= 1; @@ -157,7 +145,7 @@ int smpi_coll_tuned_allreduce_smp_binomial_pipeline(void *send_buf, if (inter_rank & mask) { src = (inter_rank - mask) * num_core; recv_offset = (phase - 2) * pcount * extent; - MPI_Recv((char *)recv_buf + recv_offset, pcount, dtype, src, tag, comm, + smpi_mpi_recv((char *)recv_buf + recv_offset, pcount, dtype, src, tag, comm, &status); break; } @@ -171,7 +159,7 @@ int smpi_coll_tuned_allreduce_smp_binomial_pipeline(void *send_buf, if (dst < comm_size) { //printf("Node %d send to node %d when mask is %d\n", rank, dst, mask); send_offset = (phase - 2) * pcount * extent; - MPI_Send((char *)recv_buf + send_offset, pcount, dtype, dst, tag, comm); + smpi_mpi_send((char *)recv_buf + send_offset, pcount, dtype, dst, tag, comm); } } mask >>= 1; @@ -190,7 +178,7 @@ int smpi_coll_tuned_allreduce_smp_binomial_pipeline(void *send_buf, if (intra_rank & mask) { src = (inter_rank * num_core) + (intra_rank - mask); recv_offset = (phase - 3) * pcount * extent; - MPI_Recv((char *)recv_buf + recv_offset, pcount, dtype, src, tag, comm, + smpi_mpi_recv((char *)recv_buf + recv_offset, pcount, dtype, src, tag, comm, &status); break; } @@ -202,13 +190,13 @@ int smpi_coll_tuned_allreduce_smp_binomial_pipeline(void *send_buf, dst = (inter_rank * num_core) + (intra_rank + mask); if (dst < comm_size) { send_offset = (phase - 3) * pcount * extent; - MPI_Send((char *)recv_buf + send_offset, pcount, dtype, dst, tag, comm); + smpi_mpi_send((char *)recv_buf + send_offset, pcount, dtype, dst, tag, comm); } mask >>= 1; } } } // for phase - free(tmp_buf); + smpi_free_tmp_buffer(tmp_buf); return MPI_SUCCESS; }