X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/2807fde4fd1f59c230d69a934634c5dfb77905f2..ea74f5d95928a521a588737e81f1de94eef25d19:/src/smpi/colls/bcast/bcast-arrival-pattern-aware.cpp diff --git a/src/smpi/colls/bcast/bcast-arrival-pattern-aware.cpp b/src/smpi/colls/bcast/bcast-arrival-pattern-aware.cpp index 484ba793fa..4cd0815745 100644 --- a/src/smpi/colls/bcast/bcast-arrival-pattern-aware.cpp +++ b/src/smpi/colls/bcast/bcast-arrival-pattern-aware.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017. The SimGrid Team. +/* Copyright (c) 2013-2022. The SimGrid Team. * All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it @@ -14,17 +14,13 @@ static int bcast_NTSL_segment_size_in_byte = 8192; namespace simgrid{ namespace smpi{ /* Non-topology-specific pipelined linear-bcast function */ -int Coll_bcast_arrival_pattern_aware::bcast(void *buf, int count, - MPI_Datatype datatype, int root, - MPI_Comm comm) +int bcast__arrival_pattern_aware(void *buf, int count, + MPI_Datatype datatype, int root, + MPI_Comm comm) { int tag = -COLL_TAG_BCAST; MPI_Status status; MPI_Request request; - MPI_Request *send_request_array; - MPI_Request *recv_request_array; - MPI_Status *send_status_array; - MPI_Status *recv_status_array; MPI_Status temp_status_array[MAX_NODE]; @@ -163,14 +159,10 @@ int Coll_bcast_arrival_pattern_aware::bcast(void *buf, int count, } /* pipeline bcast */ else { - send_request_array = - (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request)); - recv_request_array = - (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request)); - send_status_array = - (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status)); - recv_status_array = - (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status)); + auto* send_request_array = new MPI_Request[size + pipe_length]; + auto* recv_request_array = new MPI_Request[size + pipe_length]; + auto* send_status_array = new MPI_Status[size + pipe_length]; + auto* recv_status_array = new MPI_Status[size + pipe_length]; if (rank == 0) { //double start2 = MPI_Wtime(); @@ -350,16 +342,15 @@ int Coll_bcast_arrival_pattern_aware::bcast(void *buf, int count, } - free(send_request_array); - free(recv_request_array); - free(send_status_array); - free(recv_status_array); + delete[] send_request_array; + delete[] recv_request_array; + delete[] send_status_array; + delete[] recv_status_array; } /* end pipeline */ - /* when count is not divisible by block size, use default BCAST for the remainder */ if ((remainder != 0) && (count > segment)) { - XBT_WARN("MPI_bcast_arrival_pattern_aware use default MPI_bcast."); - Colls::bcast((char *)buf + (pipe_length * increment), remainder, datatype, root, comm); + XBT_INFO("MPI_bcast_arrival_pattern_aware: count is not divisible by block size, use default MPI_bcast for remainder."); + colls::bcast((char*)buf + (pipe_length * increment), remainder, datatype, root, comm); } return MPI_SUCCESS;