X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/b8df87e176f27b25534f27d7e240defa32ca35bc..8dd3206354d72d10fed1f3df1cf834573d9c0d9d:/src/smpi/colls/bcast/bcast-arrival-pattern-aware-wait.cpp diff --git a/src/smpi/colls/bcast/bcast-arrival-pattern-aware-wait.cpp b/src/smpi/colls/bcast/bcast-arrival-pattern-aware-wait.cpp index e764c4b2b2..55c45c54eb 100644 --- a/src/smpi/colls/bcast/bcast-arrival-pattern-aware-wait.cpp +++ b/src/smpi/colls/bcast/bcast-arrival-pattern-aware-wait.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2019. The SimGrid Team. +/* Copyright (c) 2013-2021. The SimGrid Team. * All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it @@ -18,17 +18,12 @@ int bcast_arrival_pattern_aware_wait_segment_size_in_byte = 8192; namespace simgrid{ namespace smpi{ /* Non-topology-specific pipelined linear-bcast function */ -int Coll_bcast_arrival_pattern_aware_wait::bcast(void *buf, int count, - MPI_Datatype datatype, - int root, MPI_Comm comm) +int bcast__arrival_pattern_aware_wait(void *buf, int count, + MPI_Datatype datatype, + int root, MPI_Comm comm) { MPI_Status status; MPI_Request request; - MPI_Request *send_request_array; - MPI_Request *recv_request_array; - MPI_Status *send_status_array; - MPI_Status *recv_status_array; - MPI_Status temp_status_array[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE]; @@ -98,14 +93,10 @@ int Coll_bcast_arrival_pattern_aware_wait::bcast(void *buf, int count, /* start pipeline bcast */ - send_request_array = - (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request)); - recv_request_array = - (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request)); - send_status_array = - (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status)); - recv_status_array = - (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status)); + auto* send_request_array = new MPI_Request[size + pipe_length]; + auto* recv_request_array = new MPI_Request[size + pipe_length]; + auto* send_status_array = new MPI_Status[size + pipe_length]; + auto* recv_status_array = new MPI_Status[size + pipe_length]; /* root */ if (rank == 0) { @@ -239,16 +230,15 @@ int Coll_bcast_arrival_pattern_aware_wait::bcast(void *buf, int count, } } - free(send_request_array); - free(recv_request_array); - free(send_status_array); - free(recv_status_array); + delete[] send_request_array; + delete[] recv_request_array; + delete[] send_status_array; + delete[] recv_status_array; /* end pipeline */ - /* when count is not divisible by block size, use default BCAST for the remainder */ if ((remainder != 0) && (count > segment)) { - XBT_WARN("MPI_bcast_arrival_pattern_aware_wait use default MPI_bcast."); - Colls::bcast((char *)buf + (pipe_length * increment), remainder, datatype, root, comm); + XBT_INFO("MPI_bcast_arrival_pattern_aware_wait: count is not divisible by block size, use default MPI_bcast for remainder."); + colls::bcast((char*)buf + (pipe_length * increment), remainder, datatype, root, comm); } return MPI_SUCCESS;