-#include "colls.h"
+/* Copyright (c) 2013-2014. The SimGrid Team.
+ * All rights reserved. */
-#ifndef NUM_CORE
-#define NUM_CORE 4
-#endif
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "colls_private.h"
int smpi_coll_tuned_allgather_loosely_lr(void *sbuf, int scount,
MPI_Datatype stype, void *rbuf,
MPI_Comm comm)
{
int comm_size, rank;
- int tag = 50;
+ int tag = COLL_TAG_ALLGATHER;
int i, j, send_offset, recv_offset;
int intra_rank, inter_rank, inter_comm_size, intra_comm_size;
int inter_dst, inter_src;
- MPI_Comm_size(comm, &comm_size);
- MPI_Comm_rank(comm, &rank);
+ comm_size = smpi_comm_size(comm);
+
+if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){
+ smpi_comm_init_smp(comm);
+ }
+ int num_core=1;
+ if (smpi_comm_is_uniform(comm)){
+ num_core = smpi_comm_size(smpi_comm_get_intra_comm(comm));
+ }
+
+ if(comm_size%num_core)
+ THROWF(arg_error,0, "allgather loosely lr algorithm can't be used with non multiple of NUM_CORE=%d number of processes ! ",num_core);
+
+ rank = smpi_comm_rank(comm);
MPI_Aint rextent, sextent;
- MPI_Type_extent(rtype, &rextent);
- MPI_Type_extent(stype, &sextent);
+ rextent = smpi_datatype_get_extent(rtype);
+ sextent = smpi_datatype_get_extent(stype);
MPI_Request inter_rrequest;
MPI_Request rrequest_array[128];
MPI_Request srequest_array[128];
MPI_Status status;
- intra_rank = rank % NUM_CORE;
- inter_rank = rank / NUM_CORE;
- inter_comm_size = (comm_size + NUM_CORE - 1) / NUM_CORE;
- intra_comm_size = NUM_CORE;
+ intra_rank = rank % num_core;
+ inter_rank = rank / num_core;
+ inter_comm_size = (comm_size + num_core - 1) / num_core;
+ intra_comm_size = num_core;
int src_seg, dst_seg;
//copy corresponding message from sbuf to rbuf
recv_offset = rank * rextent * rcount;
- MPI_Sendrecv(sbuf, scount, stype, rank, tag,
+ smpi_mpi_sendrecv(sbuf, scount, stype, rank, tag,
(char *)rbuf + recv_offset, rcount, rtype, rank, tag, comm, &status);
int dst, src;
if (intra_rank == j) {
if (i != inter_comm_size - 1) {
- MPI_Irecv((char *)rbuf + inter_recv_offset, rcount, rtype, inter_src, tag,
- comm, &inter_rrequest);
- MPI_Isend((char *)rbuf + inter_send_offset, scount, stype, inter_dst, tag,
- comm, &inter_srequest_array[inter_srequest_count++]);
-
+ inter_rrequest = smpi_mpi_irecv((char *)rbuf + inter_recv_offset, rcount, rtype,
+ inter_src, tag, comm);
+ inter_srequest_array[inter_srequest_count++] = smpi_mpi_isend((char *)rbuf + inter_send_offset, scount, stype,
+ inter_dst, tag, comm);
}
}
//intra_communication
if (j != intra_rank) {
- MPI_Irecv((char *)rbuf + recv_offset, rcount, rtype, src, tag, comm,
- &rrequest_array[rrequest_count++]);
- MPI_Isend((char *)rbuf + send_offset, scount, stype, dst, tag, comm,
- &srequest_array[srequest_count++]);
+ rrequest_array[rrequest_count++] = smpi_mpi_irecv((char *)rbuf + recv_offset, rcount, rtype, src, tag, comm);
+ srequest_array[srequest_count++] = smpi_mpi_isend((char *)rbuf + send_offset, scount, stype, dst, tag, comm);
}
} // intra loop
- // wait for inter communication to finish for these rounds (# of round equals NUM_CORE)
+ // wait for inter communication to finish for these rounds (# of round equals num_core)
if (i != inter_comm_size - 1) {
- MPI_Wait(&inter_rrequest, &status);
+ smpi_mpi_wait(&inter_rrequest, &status);
}
} //inter loop
- MPI_Waitall(rrequest_count, rrequest_array, MPI_STATUSES_IGNORE);
- MPI_Waitall(srequest_count, srequest_array, MPI_STATUSES_IGNORE);
- MPI_Waitall(inter_srequest_count, inter_srequest_array, MPI_STATUSES_IGNORE);
+ smpi_mpi_waitall(rrequest_count, rrequest_array, MPI_STATUSES_IGNORE);
+ smpi_mpi_waitall(srequest_count, srequest_array, MPI_STATUSES_IGNORE);
+ smpi_mpi_waitall(inter_srequest_count, inter_srequest_array, MPI_STATUSES_IGNORE);
return MPI_SUCCESS;
}