X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/d155fd69fa99c97b3a9c86bb7f2e472c2e7332df..793611840734611c59b5a25b70ef779a89c08c6a:/src/smpi/colls/allreduce-smp-rsag.c diff --git a/src/smpi/colls/allreduce-smp-rsag.c b/src/smpi/colls/allreduce-smp-rsag.c index a40f18979d..8d751c88da 100644 --- a/src/smpi/colls/allreduce-smp-rsag.c +++ b/src/smpi/colls/allreduce-smp-rsag.c @@ -1,4 +1,10 @@ -#include "colls.h" +/* Copyright (c) 2013-2014. The SimGrid Team. + * All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +#include "colls_private.h" /* change number of core per smp-node we assume that number of core per process will be the same for all implementations */ @@ -19,10 +25,13 @@ int smpi_coll_tuned_allreduce_smp_rsag(void *send_buf, void *recv_buf, { int comm_size, rank; void *tmp_buf; - int tag = 50; + int tag = COLL_TAG_ALLREDUCE; int mask, src, dst; MPI_Status status; - int num_core = NUM_CORE; + int num_core = simcall_host_get_core(SIMIX_host_self()); + // do we use the default one or the number of cores in the platform ? + // if the number of cores is one, the platform may be simulated with 1 node = 1 core + if (num_core == 1) num_core = NUM_CORE; /* #ifdef MPICH2_REDUCTION MPI_User_function * uop = MPIR_Op_table[op % 16 - 1]; @@ -33,11 +42,11 @@ int smpi_coll_tuned_allreduce_smp_rsag(void *send_buf, void *recv_buf, uop = op_ptr->op; #endif */ - MPI_Comm_size(comm, &comm_size); - MPI_Comm_rank(comm, &rank); + comm_size = smpi_comm_size(comm); + rank = smpi_comm_rank(comm); MPI_Aint extent; - MPI_Type_extent(dtype, &extent); - tmp_buf = (void *) malloc(count * extent); + extent = smpi_datatype_get_extent(dtype); + tmp_buf = (void *) xbt_malloc(count * extent); int intra_rank, inter_rank; intra_rank = rank % num_core; @@ -53,7 +62,7 @@ int smpi_coll_tuned_allreduce_smp_rsag(void *send_buf, void *recv_buf, } - MPI_Sendrecv(send_buf, count, dtype, rank, tag, + smpi_mpi_sendrecv(send_buf, count, dtype, rank, tag, recv_buf, count, dtype, rank, tag, comm, &status); @@ -64,14 +73,14 @@ int smpi_coll_tuned_allreduce_smp_rsag(void *send_buf, void *recv_buf, src = (inter_rank * num_core) + (intra_rank | mask); // if (src < ((inter_rank + 1) * num_core)) { if (src < comm_size) { - MPI_Recv(tmp_buf, count, dtype, src, tag, comm, &status); - star_reduction(op, tmp_buf, recv_buf, &count, &dtype); + smpi_mpi_recv(tmp_buf, count, dtype, src, tag, comm, &status); + smpi_op_apply(op, tmp_buf, recv_buf, &count, &dtype); //printf("Node %d recv from node %d when mask is %d\n", rank, src, mask); } } else { dst = (inter_rank * num_core) + (intra_rank & (~mask)); - MPI_Send(recv_buf, count, dtype, dst, tag, comm); + smpi_mpi_send(recv_buf, count, dtype, dst, tag, comm); //printf("Node %d send to node %d when mask is %d\n", rank, dst, mask); break; } @@ -100,12 +109,12 @@ int smpi_coll_tuned_allreduce_smp_rsag(void *send_buf, void *recv_buf, ((inter_rank - 2 - i + inter_comm_size) % inter_comm_size) * seg_count * extent; - MPI_Sendrecv((char *) recv_buf + send_offset, seg_count, dtype, to, + smpi_mpi_sendrecv((char *) recv_buf + send_offset, seg_count, dtype, to, tag + i, tmp_buf, seg_count, dtype, from, tag + i, comm, &status); // result is in rbuf - star_reduction(op, tmp_buf, (char *) recv_buf + recv_offset, &seg_count, + smpi_op_apply(op, tmp_buf, (char *) recv_buf + recv_offset, &seg_count, &dtype); } @@ -119,7 +128,7 @@ int smpi_coll_tuned_allreduce_smp_rsag(void *send_buf, void *recv_buf, ((inter_rank - 1 - i + inter_comm_size) % inter_comm_size) * seg_count * extent; - MPI_Sendrecv((char *) recv_buf + send_offset, seg_count, dtype, to, + smpi_mpi_sendrecv((char *) recv_buf + send_offset, seg_count, dtype, to, tag + i, (char *) recv_buf + recv_offset, seg_count, dtype, from, tag + i, comm, &status); @@ -139,14 +148,14 @@ int smpi_coll_tuned_allreduce_smp_rsag(void *send_buf, void *recv_buf, if ((mask & inter_rank) == 0) { src = (inter_rank | mask) * num_core; if (src < comm_size) { - MPI_Recv(tmp_buf, count, dtype, src, tag, comm, &status); + smpi_mpi_recv(tmp_buf, count, dtype, src, tag, comm, &status); (* uop) (tmp_buf, recv_buf, &count, &dtype); //printf("Node %d recv from node %d when mask is %d\n", rank, src, mask); } } else { dst = (inter_rank & (~mask)) * num_core; - MPI_Send(recv_buf, count, dtype, dst, tag, comm); + smpi_mpi_send(recv_buf, count, dtype, dst, tag, comm); //printf("Node %d send to node %d when mask is %d\n", rank, dst, mask); break; } @@ -165,7 +174,7 @@ int smpi_coll_tuned_allreduce_smp_rsag(void *send_buf, void *recv_buf, if (inter_rank & mask) { src = (inter_rank - mask) * num_core; //printf("Node %d recv from node %d when mask is %d\n", rank, src, mask); - MPI_Recv(recv_buf, count, dtype, src, tag, comm, &status); + smpi_mpi_recv(recv_buf, count, dtype, src, tag, comm, &status); break; } mask <<= 1; @@ -179,7 +188,7 @@ int smpi_coll_tuned_allreduce_smp_rsag(void *send_buf, void *recv_buf, dst = (inter_rank + mask) * num_core; if (dst < comm_size) { //printf("Node %d send to node %d when mask is %d\n", rank, dst, mask); - MPI_Send(recv_buf, count, dtype, dst, tag, comm); + smpi_mpi_send(recv_buf, count, dtype, dst, tag, comm); } } mask >>= 1; @@ -200,7 +209,7 @@ int smpi_coll_tuned_allreduce_smp_rsag(void *send_buf, void *recv_buf, if (intra_rank & mask) { src = (inter_rank * num_core) + (intra_rank - mask); //printf("Node %d recv from node %d when mask is %d\n", rank, src, mask); - MPI_Recv(recv_buf, count, dtype, src, tag, comm, &status); + smpi_mpi_recv(recv_buf, count, dtype, src, tag, comm, &status); break; } mask <<= 1; @@ -213,7 +222,7 @@ int smpi_coll_tuned_allreduce_smp_rsag(void *send_buf, void *recv_buf, dst = (inter_rank * num_core) + (intra_rank + mask); if (dst < comm_size) { //printf("Node %d send to node %d when mask is %d\n", rank, dst, mask); - MPI_Send(recv_buf, count, dtype, dst, tag, comm); + smpi_mpi_send(recv_buf, count, dtype, dst, tag, comm); } mask >>= 1; }