-#include "colls.h"
+/* Copyright (c) 2013-2014. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "colls_private.h"
//#include <star-reduction.c>
int reduce_NTSL_segment_size_in_byte = 8192;
MPI_Datatype datatype, MPI_Op op, int root,
MPI_Comm comm)
{
- int tag = 50;
+ int tag = COLL_TAG_REDUCE;
MPI_Status status;
MPI_Request *send_request_array;
MPI_Request *recv_request_array;
int rank, size;
int i;
MPI_Aint extent;
- MPI_Type_extent(datatype, &extent);
+ extent = smpi_datatype_get_extent(datatype);
- MPI_Comm_rank(MPI_COMM_WORLD, &rank);
- MPI_Comm_size(MPI_COMM_WORLD, &size);
+ rank = smpi_comm_rank(comm);
+ size = smpi_comm_size(comm);
/* source node and destination nodes (same through out the functions) */
int to = (rank - 1 + size) % size;
/*
if (root != 0) {
if (rank == root){
- MPI_Send(buf,count,datatype,0,tag,comm);
+ smpi_mpi_send(buf,count,datatype,0,tag,comm);
}
else if (rank == 0) {
- MPI_Recv(buf,count,datatype,root,tag,comm,&status);
+ smpi_mpi_recv(buf,count,datatype,root,tag,comm,&status);
}
}
*/
char *tmp_buf;
- tmp_buf = (char *) malloc(count * extent);
+ tmp_buf = (char *) smpi_get_tmp_sendbuffer(count * extent);
- MPI_Sendrecv(buf, count, datatype, rank, tag, rbuf, count, datatype, rank,
+ smpi_mpi_sendrecv(buf, count, datatype, rank, tag, rbuf, count, datatype, rank,
tag, comm, &status);
/* when a message is smaller than a block size => no pipeline */
if (count <= segment) {
if (rank == root) {
- MPI_Recv(tmp_buf, count, datatype, from, tag, comm, &status);
- star_reduction(op, tmp_buf, rbuf, &count, &datatype);
+ smpi_mpi_recv(tmp_buf, count, datatype, from, tag, comm, &status);
+ smpi_op_apply(op, tmp_buf, rbuf, &count, &datatype);
} else if (rank == ((root - 1 + size) % size)) {
- MPI_Send(rbuf, count, datatype, to, tag, comm);
+ smpi_mpi_send(rbuf, count, datatype, to, tag, comm);
} else {
- MPI_Recv(tmp_buf, count, datatype, from, tag, comm, &status);
- star_reduction(op, tmp_buf, rbuf, &count, &datatype);
- MPI_Send(rbuf, count, datatype, to, tag, comm);
+ smpi_mpi_recv(tmp_buf, count, datatype, from, tag, comm, &status);
+ smpi_op_apply(op, tmp_buf, rbuf, &count, &datatype);
+ smpi_mpi_send(rbuf, count, datatype, to, tag, comm);
}
- free(tmp_buf);
+ smpi_free_tmp_buffer(tmp_buf);
return MPI_SUCCESS;
}
/* pipeline */
else {
send_request_array =
- (MPI_Request *) malloc((size + pipe_length) * sizeof(MPI_Request));
+ (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request));
recv_request_array =
- (MPI_Request *) malloc((size + pipe_length) * sizeof(MPI_Request));
+ (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request));
send_status_array =
- (MPI_Status *) malloc((size + pipe_length) * sizeof(MPI_Status));
+ (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status));
recv_status_array =
- (MPI_Status *) malloc((size + pipe_length) * sizeof(MPI_Status));
+ (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status));
/* root recv data */
if (rank == root) {
for (i = 0; i < pipe_length; i++) {
- MPI_Irecv((char *) tmp_buf + (i * increment), segment, datatype, from,
- (tag + i), comm, &recv_request_array[i]);
+ recv_request_array[i] = smpi_mpi_irecv((char *) tmp_buf + (i * increment), segment, datatype, from,
+ (tag + i), comm);
}
for (i = 0; i < pipe_length; i++) {
- MPI_Wait(&recv_request_array[i], &status);
- star_reduction(op, tmp_buf + (i * increment), (char *)rbuf + (i * increment),
+ smpi_mpi_wait(&recv_request_array[i], &status);
+ smpi_op_apply(op, tmp_buf + (i * increment), (char *)rbuf + (i * increment),
&segment, &datatype);
}
}
/* last node only sends data */
else if (rank == ((root - 1 + size) % size)) {
for (i = 0; i < pipe_length; i++) {
- MPI_Isend((char *)rbuf + (i * increment), segment, datatype, to, (tag + i),
- comm, &send_request_array[i]);
+ send_request_array[i] = smpi_mpi_isend((char *)rbuf + (i * increment), segment, datatype, to, (tag + i),
+ comm);
}
- MPI_Waitall((pipe_length), send_request_array, send_status_array);
+ smpi_mpi_waitall((pipe_length), send_request_array, send_status_array);
}
/* intermediate nodes relay (receive, reduce, then send) data */
else {
for (i = 0; i < pipe_length; i++) {
- MPI_Irecv((char *) tmp_buf + (i * increment), segment, datatype, from,
- (tag + i), comm, &recv_request_array[i]);
+ recv_request_array[i] = smpi_mpi_irecv((char *) tmp_buf + (i * increment), segment, datatype, from,
+ (tag + i), comm);
}
for (i = 0; i < pipe_length; i++) {
- MPI_Wait(&recv_request_array[i], &status);
- star_reduction(op, tmp_buf + (i * increment), (char *)rbuf + (i * increment),
+ smpi_mpi_wait(&recv_request_array[i], &status);
+ smpi_op_apply(op, tmp_buf + (i * increment), (char *)rbuf + (i * increment),
&segment, &datatype);
- MPI_Isend((char *) rbuf + (i * increment), segment, datatype, to,
- (tag + i), comm, &send_request_array[i]);
+ send_request_array[i] = smpi_mpi_isend((char *) rbuf + (i * increment), segment, datatype, to,
+ (tag + i), comm);
}
- MPI_Waitall((pipe_length), send_request_array, send_status_array);
+ smpi_mpi_waitall((pipe_length), send_request_array, send_status_array);
}
free(send_request_array);
/* when count is not divisible by block size, use default BCAST for the remainder */
if ((remainder != 0) && (count > segment)) {
- MPI_Reduce((char *)buf + (pipe_length * increment),
+ XBT_WARN("MPI_reduce_NTSL use default MPI_reduce.");
+ smpi_mpi_reduce((char *)buf + (pipe_length * increment),
(char *)rbuf + (pipe_length * increment), remainder, datatype, op, root,
comm);
}