X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/40616078da72e823931c1fb884949054699ec39d..5089a0a98b27f5eeee62321dff4f025f1648f025:/src/smpi/colls/bcast/bcast-SMP-linear.cpp diff --git a/src/smpi/colls/bcast/bcast-SMP-linear.cpp b/src/smpi/colls/bcast/bcast-SMP-linear.cpp index 356c53ac2a..ea576febf3 100644 --- a/src/smpi/colls/bcast/bcast-SMP-linear.cpp +++ b/src/smpi/colls/bcast/bcast-SMP-linear.cpp @@ -1,22 +1,21 @@ -/* Copyright (c) 2013-2014. The SimGrid Team. +/* Copyright (c) 2013-2019. The SimGrid Team. * All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ -#include "../colls_private.h" +#include "../colls_private.hpp" int bcast_SMP_linear_segment_byte = 8192; - -int smpi_coll_tuned_bcast_SMP_linear(void *buf, int count, - MPI_Datatype datatype, int root, - MPI_Comm comm) +namespace simgrid{ +namespace smpi{ +int bcast__SMP_linear(void *buf, int count, + MPI_Datatype datatype, int root, + MPI_Comm comm) { int tag = COLL_TAG_BCAST; MPI_Status status; MPI_Request request; - MPI_Request *request_array; - MPI_Status *status_array; int rank, size; int i; MPI_Aint extent; @@ -32,12 +31,11 @@ int smpi_coll_tuned_bcast_SMP_linear(void *buf, int count, num_core = comm->get_intra_comm()->size(); }else{ //implementation buggy in this case - return smpi_coll_tuned_bcast_mpich( buf , count, datatype, - root, comm); + return bcast__mpich(buf, count, datatype, root, comm); } int segment = bcast_SMP_linear_segment_byte / extent; - segment = segment == 0 ? 1 :segment; + segment = segment == 0 ? 1 :segment; int pipe_length = count / segment; int remainder = count % segment; int increment = segment * extent; @@ -52,9 +50,9 @@ int smpi_coll_tuned_bcast_SMP_linear(void *buf, int count, // call native when MPI communication size is too small if (size <= num_core) { - XBT_WARN("MPI_bcast_SMP_linear use default MPI_bcast."); - smpi_mpi_bcast(buf, count, datatype, root, comm); - return MPI_SUCCESS; + XBT_WARN("MPI_bcast_SMP_linear use default MPI_bcast."); + bcast__default(buf, count, datatype, root, comm); + return MPI_SUCCESS; } // if root is not zero send to rank zero first if (root != 0) { @@ -63,7 +61,7 @@ int smpi_coll_tuned_bcast_SMP_linear(void *buf, int count, else if (rank == 0) Request::recv(buf, count, datatype, root, tag, comm, &status); } - // when a message is smaller than a block size => no pipeline + // when a message is smaller than a block size => no pipeline if (count <= segment) { // case ROOT if (rank == 0) { @@ -98,10 +96,8 @@ int smpi_coll_tuned_bcast_SMP_linear(void *buf, int count, } // pipeline bcast else { - request_array = - (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request)); - status_array = - (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status)); + MPI_Request* request_array = new MPI_Request[size + pipe_length]; + MPI_Status* status_array = new MPI_Status[size + pipe_length]; // case ROOT of each SMP if (rank % num_core == 0) { @@ -163,16 +159,19 @@ int smpi_coll_tuned_bcast_SMP_linear(void *buf, int count, } } } - free(request_array); - free(status_array); + delete[] request_array; + delete[] status_array; } // when count is not divisible by block size, use default BCAST for the remainder if ((remainder != 0) && (count > segment)) { - XBT_WARN("MPI_bcast_SMP_linear use default MPI_bcast."); - smpi_mpi_bcast((char *) buf + (pipe_length * increment), remainder, datatype, + XBT_WARN("MPI_bcast_SMP_linear use default MPI_bcast."); + Colls::bcast((char *) buf + (pipe_length * increment), remainder, datatype, root, comm); } return MPI_SUCCESS; } + +} +}