Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
Update copyright lines for 2022.
[simgrid.git] / src / smpi / colls / allreduce / allreduce-mvapich-rs.cpp
index 1711f3b..7d33c76 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017. The SimGrid Team.
+/* Copyright (c) 2013-2022. The SimGrid Team.
  * All rights reserved.                                                     */
 
 /* This program is free software; you can redistribute it and/or modify it
  */
 
 #include "../colls_private.hpp"
-namespace simgrid{
-namespace smpi{
-int Coll_allreduce_mvapich2_rs::allreduce(void *sendbuf,
-                            void *recvbuf,
-                            int count,
-                            MPI_Datatype datatype,
-                            MPI_Op op, MPI_Comm comm)
+#include <algorithm>
+
+namespace simgrid {
+namespace smpi {
+int allreduce__mvapich2_rs(const void *sendbuf,
+                           void *recvbuf,
+                           int count,
+                           MPI_Datatype datatype,
+                           MPI_Op op, MPI_Comm comm)
 {
     int mpi_errno = MPI_SUCCESS;
     int newrank = 0;
     int mask, pof2, i, send_idx, recv_idx, last_idx, send_cnt;
-    int dst, is_commutative, rem, newdst,
-        recv_cnt, *cnts, *disps;
+    int dst, rem, newdst, recv_cnt;
     MPI_Aint true_lb, true_extent, extent;
-    void *tmp_buf, *tmp_buf_free;
 
     if (count == 0) {
         return MPI_SUCCESS;
@@ -47,16 +47,16 @@ int Coll_allreduce_mvapich2_rs::allreduce(void *sendbuf,
     int comm_size =  comm->size();
     int rank = comm->rank();
 
-    is_commutative = (op==MPI_OP_NULL || op->is_commutative());
+    bool is_commutative = (op == MPI_OP_NULL || op->is_commutative());
 
     /* need to allocate temporary buffer to store incoming data */
     datatype->extent(&true_lb, &true_extent);
     extent = datatype->get_extent();
 
-    tmp_buf_free= smpi_get_tmp_recvbuffer(count * (MAX(extent, true_extent)));
+    unsigned char* tmp_buf_free = smpi_get_tmp_recvbuffer(count * std::max(extent, true_extent));
 
     /* adjust for potential negative lower bound in datatype */
-    tmp_buf = (void *) ((char *) tmp_buf_free - true_lb);
+    unsigned char* tmp_buf = tmp_buf_free - true_lb;
 
     /* copy local data into recvbuf */
     if (sendbuf != MPI_IN_PLACE) {
@@ -84,7 +84,7 @@ int Coll_allreduce_mvapich2_rs::allreduce(void *sendbuf,
                                      COLL_TAG_ALLREDUCE, comm);
 
             /* temporarily set the rank to -1 so that this
-               process does not pariticipate in recursive
+               process does not participate in recursive
                doubling */
             newrank = -1;
         } else {
@@ -150,8 +150,8 @@ int Coll_allreduce_mvapich2_rs::allreduce(void *sendbuf,
             /* for the reduce-scatter, calculate the count that
                each process receives and the displacement within
                the buffer */
-            cnts = (int *)xbt_malloc(pof2 * sizeof (int));
-            disps = (int *)xbt_malloc(pof2 * sizeof (int));
+            int* cnts  = new int[pof2];
+            int* disps = new int[pof2];
 
             for (i = 0; i < (pof2 - 1); i++) {
                 cnts[i] = count / pof2;
@@ -187,15 +187,9 @@ int Coll_allreduce_mvapich2_rs::allreduce(void *sendbuf,
                 }
 
                 /* Send data from recvbuf. Recv into tmp_buf */
-                Request::sendrecv((char *) recvbuf +
-                                             disps[send_idx] * extent,
-                                             send_cnt, datatype,
-                                             dst, COLL_TAG_ALLREDUCE,
-                                             (char *) tmp_buf +
-                                             disps[recv_idx] * extent,
-                                             recv_cnt, datatype, dst,
-                                             COLL_TAG_ALLREDUCE, comm,
-                                             MPI_STATUS_IGNORE);
+                Request::sendrecv(static_cast<char*>(recvbuf) + disps[send_idx] * extent, send_cnt, datatype, dst,
+                                  COLL_TAG_ALLREDUCE, tmp_buf + disps[recv_idx] * extent, recv_cnt, datatype, dst,
+                                  COLL_TAG_ALLREDUCE, comm, MPI_STATUS_IGNORE);
 
                 /* tmp_buf contains data received in this step.
                    recvbuf contains data accumulated so far */
@@ -203,9 +197,9 @@ int Coll_allreduce_mvapich2_rs::allreduce(void *sendbuf,
                 /* This algorithm is used only for predefined ops
                    and predefined ops are always commutative. */
 
-                if(op!=MPI_OP_NULL) op->apply( (char *) tmp_buf + disps[recv_idx] * extent,
-                        (char *) recvbuf + disps[recv_idx] * extent,
-                        &recv_cnt, datatype);
+                if (op != MPI_OP_NULL)
+                  op->apply(tmp_buf + disps[recv_idx] * extent, static_cast<char*>(recvbuf) + disps[recv_idx] * extent,
+                            &recv_cnt, datatype);
 
                 /* update send_idx for next iteration */
                 send_idx = recv_idx;
@@ -265,8 +259,8 @@ int Coll_allreduce_mvapich2_rs::allreduce(void *sendbuf,
 
                 mask >>= 1;
             }
-            xbt_free(disps);
-            xbt_free(cnts);
+            delete[] disps;
+            delete[] cnts;
         }
     }