X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/b7ed19dfcc221d7b3eca182abb5c4a3946671172..cdf6a962eb4e88efbed3df9c41343adabcf09e6c:/src/smpi/colls/bcast/bcast-arrival-pattern-aware-wait.cpp diff --git a/src/smpi/colls/bcast/bcast-arrival-pattern-aware-wait.cpp b/src/smpi/colls/bcast/bcast-arrival-pattern-aware-wait.cpp index 5baa7cf3e9..46e6125f26 100644 --- a/src/smpi/colls/bcast/bcast-arrival-pattern-aware-wait.cpp +++ b/src/smpi/colls/bcast/bcast-arrival-pattern-aware-wait.cpp @@ -1,10 +1,10 @@ -/* Copyright (c) 2013-2014. The SimGrid Team. +/* Copyright (c) 2013-2019. The SimGrid Team. * All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ -#include "../colls_private.h" +#include "../colls_private.hpp" int bcast_arrival_pattern_aware_wait_segment_size_in_byte = 8192; @@ -15,7 +15,8 @@ int bcast_arrival_pattern_aware_wait_segment_size_in_byte = 8192; #ifndef BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE #define BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE 128 #endif - +namespace simgrid{ +namespace smpi{ /* Non-topology-specific pipelined linear-bcast function */ int Coll_bcast_arrival_pattern_aware_wait::bcast(void *buf, int count, MPI_Datatype datatype, @@ -23,11 +24,6 @@ int Coll_bcast_arrival_pattern_aware_wait::bcast(void *buf, int count, { MPI_Status status; MPI_Request request; - MPI_Request *send_request_array; - MPI_Request *recv_request_array; - MPI_Status *send_status_array; - MPI_Status *recv_status_array; - MPI_Status temp_status_array[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE]; @@ -61,14 +57,14 @@ int Coll_bcast_arrival_pattern_aware_wait::bcast(void *buf, int count, /* segment is segment size in number of elements (not bytes) */ int segment = bcast_arrival_pattern_aware_wait_segment_size_in_byte / extent; - segment = segment == 0 ? 1 :segment; + segment = segment == 0 ? 1 :segment; /* pipeline length */ int pipe_length = count / segment; /* use for buffer offset for sending and receiving data = segment size in byte */ int increment = segment * extent; - /* if the input size is not divisible by segment size => + /* if the input size is not divisible by segment size => the small remainder will be done with native implementation */ int remainder = count % segment; @@ -97,14 +93,10 @@ int Coll_bcast_arrival_pattern_aware_wait::bcast(void *buf, int count, /* start pipeline bcast */ - send_request_array = - (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request)); - recv_request_array = - (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request)); - send_status_array = - (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status)); - recv_status_array = - (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status)); + MPI_Request* send_request_array = new MPI_Request[size + pipe_length]; + MPI_Request* recv_request_array = new MPI_Request[size + pipe_length]; + MPI_Status* send_status_array = new MPI_Status[size + pipe_length]; + MPI_Status* recv_status_array = new MPI_Status[size + pipe_length]; /* root */ if (rank == 0) { @@ -238,17 +230,20 @@ int Coll_bcast_arrival_pattern_aware_wait::bcast(void *buf, int count, } } - free(send_request_array); - free(recv_request_array); - free(send_status_array); - free(recv_status_array); + delete[] send_request_array; + delete[] recv_request_array; + delete[] send_status_array; + delete[] recv_status_array; /* end pipeline */ /* when count is not divisible by block size, use default BCAST for the remainder */ if ((remainder != 0) && (count > segment)) { - XBT_WARN("MPI_bcast_arrival_pattern_aware_wait use default MPI_bcast."); + XBT_WARN("MPI_bcast_arrival_pattern_aware_wait use default MPI_bcast."); Colls::bcast((char *)buf + (pipe_length * increment), remainder, datatype, root, comm); } return MPI_SUCCESS; } + +} +}