-/* Copyright (c) 2013-2017. The SimGrid Team.
+/* Copyright (c) 2013-2019. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* Additional copyrights may follow
*/
-#include "../colls_private.h"
-#include "../coll_tuned_topo.h"
+#include "../coll_tuned_topo.hpp"
+#include "../colls_private.hpp"
namespace simgrid{
namespace smpi{
-int Coll_gather_ompi_binomial::gather(void* sbuf, int scount, MPI_Datatype sdtype, void* rbuf, int rcount,
+int Coll_gather_ompi_binomial::gather(const void* sbuf, int scount, MPI_Datatype sdtype, void* rbuf, int rcount,
MPI_Datatype rdtype, int root, MPI_Comm comm)
{
int line = -1;
int vrank;
int size;
int total_recv = 0;
- char *ptmp = NULL;
- char *tempbuf = NULL;
+ unsigned char* ptmp = nullptr;
+ unsigned char* tempbuf = nullptr;
+ const unsigned char* src_buf;
int err;
ompi_coll_tree_t* bmtree;
MPI_Status status;
- MPI_Aint sextent, slb, strue_lb, strue_extent;
+ MPI_Aint sextent, slb, strue_lb, strue_extent;
MPI_Aint rextent, rlb, rtrue_lb, rtrue_extent;
rdtype->extent(&rtrue_lb, &rtrue_extent);
if (0 == root) {
/* root on 0, just use the recv buffer */
- ptmp = (char*)rbuf;
+ ptmp = static_cast<unsigned char*>(rbuf);
if (sbuf != MPI_IN_PLACE) {
err = Datatype::copy(sbuf, scount, sdtype, ptmp, rcount, rdtype);
if (MPI_SUCCESS != err) {
} else {
/* root is not on 0, allocate temp buffer for recv,
* rotate data at the end */
- tempbuf = (char*)smpi_get_tmp_recvbuffer(rtrue_extent + (rcount * size - 1) * rextent);
+ tempbuf = smpi_get_tmp_recvbuffer(rtrue_extent + (rcount * size - 1) * rextent);
if (NULL == tempbuf) {
err = MPI_ERR_OTHER;
line = __LINE__;
}
}
total_recv = rcount;
+ src_buf = ptmp;
} else if (!(vrank % 2)) {
/* other non-leaf nodes, allocate temp buffer for data received from
* children, the most we need is half of the total data elements due
* to the property of binimoal tree */
- tempbuf = (char*)smpi_get_tmp_sendbuffer(strue_extent + (scount * size - 1) * sextent);
+ tempbuf = smpi_get_tmp_sendbuffer(strue_extent + (scount * size - 1) * sextent);
if (NULL == tempbuf) {
err = MPI_ERR_OTHER;
line = __LINE__;
rcount = scount;
rextent = sextent;
total_recv = rcount;
+ src_buf = ptmp;
} else {
/* leaf nodes, no temp buffer needed, use sdtype,scount as
* rdtype,rdcount since they are ignored on non-root procs */
- ptmp = (char*)sbuf;
total_recv = scount;
+ src_buf = static_cast<const unsigned char*>(sbuf);
}
if (!(vrank % 2)) {
/* all nodes except root send to parents */
XBT_DEBUG("smpi_coll_tuned_gather_ompi_binomial rank %d send %d count %d\n", rank, bmtree->tree_prev, total_recv);
- Request::send(ptmp, total_recv, sdtype, bmtree->tree_prev, COLL_TAG_GATHER, comm);
+ Request::send(src_buf, total_recv, sdtype, bmtree->tree_prev, COLL_TAG_GATHER, comm);
}
if (rank == root) {
if (root != 0) {
/* other non-leaf nodes */
smpi_free_tmp_buffer(tempbuf);
}
- xbt_free(bmtree);
+ ompi_coll_tuned_topo_destroy_tree(&bmtree);
return MPI_SUCCESS;
err_hndl:
* Accepts: - same arguments as MPI_Gather(), first segment size
* Returns: - MPI_SUCCESS or error code
*/
-int Coll_gather_ompi_linear_sync::gather(void *sbuf, int scount,
+int Coll_gather_ompi_linear_sync::gather(const void *sbuf, int scount,
MPI_Datatype sdtype,
void *rbuf, int rcount,
MPI_Datatype rdtype,
- int root,
+ int root,
MPI_Comm comm)
{
int i;
int first_segment_size=0;
size = comm->size();
rank = comm->rank();
-
+
size_t dsize, block_size;
if (rank == root) {
dsize= rdtype->size();
dsize=sdtype->size();
block_size = dsize * scount;
}
-
+
if (block_size > 92160){
first_segment_size = 32768;
}else{
first_segment_count = scount;
COLL_TUNED_COMPUTED_SEGCOUNT((size_t)first_segment_size, typelng, first_segment_count);
- Request::recv(sbuf, 0, MPI_BYTE, root, COLL_TAG_GATHER, comm, MPI_STATUS_IGNORE);
+ Request::recv(nullptr, 0, MPI_BYTE, root, COLL_TAG_GATHER, comm, MPI_STATUS_IGNORE);
Request::send(sbuf, first_segment_count, sdtype, root, COLL_TAG_GATHER, comm);
- Waitall for all the second segments to complete.
*/
char* ptmp;
- MPI_Request *reqs = NULL, first_segment_req;
- reqs = (MPI_Request*)calloc(size, sizeof(MPI_Request));
+ MPI_Request first_segment_req;
+ MPI_Request* reqs = new (std::nothrow) MPI_Request[size];
if (NULL == reqs) {
ret = -1;
line = __LINE__;
goto error_hndl; }
-
+
typelng=rdtype->size();
rdtype->extent(&lb, &extent);
first_segment_count = rcount;
- COLL_TUNED_COMPUTED_SEGCOUNT( (size_t)first_segment_size, typelng,
+ COLL_TUNED_COMPUTED_SEGCOUNT( (size_t)first_segment_size, typelng,
first_segment_count );
for (i = 0; i < size; ++i) {
- if (i == rank) {
+ if (i == rank) {
/* skip myself */
- reqs[i] = MPI_REQUEST_NULL;
- continue;
- }
+ reqs[i] = MPI_REQUEST_NULL;
+ continue;
+ }
/* irecv for the first segment from i */
ptmp = (char*)rbuf + i * rcount * extent;
first_segment_req = Request::irecv(ptmp, first_segment_count, rdtype, i,
COLL_TAG_GATHER, comm
);
-
+
/* send sync message */
Request::send(rbuf, 0, MPI_BYTE, i,
COLL_TAG_GATHER,
/* irecv for the second segment */
ptmp = (char*)rbuf + (i * rcount + first_segment_count) * extent;
- reqs[i]=Request::irecv(ptmp, (rcount - first_segment_count),
+ reqs[i]=Request::irecv(ptmp, (rcount - first_segment_count),
rdtype, i, COLL_TAG_GATHER, comm
);
/* copy local data if necessary */
if (MPI_IN_PLACE != sbuf) {
ret = Datatype::copy(sbuf, scount, sdtype,
- (char*)rbuf + rank * rcount * extent,
+ (char*)rbuf + rank * rcount * extent,
rcount, rdtype);
if (ret != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }
}
-
+
/* wait all second segments to complete */
ret = Request::waitall(size, reqs, MPI_STATUSES_IGNORE);
if (ret != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }
- free(reqs);
+ delete[] reqs;
}
/* All done */
return MPI_SUCCESS;
error_hndl:
- XBT_DEBUG(
- "ERROR_HNDL: node %d file %s line %d error %d\n",
+ XBT_DEBUG(
+ "ERROR_HNDL: node %d file %s line %d error %d\n",
rank, __FILE__, line, ret );
return ret;
}
/*
* Linear functions are copied from the BASIC coll module
* they do not segment the message and are simple implementations
- * but for some small number of nodes and/or small data sizes they
- * are just as fast as tuned/tree based segmenting operations
+ * but for some small number of nodes and/or small data sizes they
+ * are just as fast as tuned/tree based segmenting operations
* and as such may be selected by the decision functions
* These are copied into this module due to the way we select modules
* in V1. i.e. in V2 we will handle this differently and so will not
* Accepts: - same arguments as MPI_Gather()
* Returns: - MPI_SUCCESS or error code
*/
-int Coll_gather_ompi_basic_linear::gather(void* sbuf, int scount, MPI_Datatype sdtype, void* rbuf, int rcount,
+int Coll_gather_ompi_basic_linear::gather(const void* sbuf, int scount, MPI_Datatype sdtype, void* rbuf, int rcount,
MPI_Datatype rdtype, int root, MPI_Comm comm)
{
int i;