1 /* Copyright (c) 2013-2014. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "colls_private.h"
9 int smpi_coll_tuned_allgather_smp_simple(void *send_buf, int scount,
10 MPI_Datatype stype, void *recv_buf,
11 int rcount, MPI_Datatype rtype,
14 int src, dst, comm_size, rank;
15 comm_size = smpi_comm_size(comm);
17 if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){
18 smpi_comm_init_smp(comm);
21 if (smpi_comm_is_uniform(comm)){
22 num_core = smpi_comm_size(smpi_comm_get_intra_comm(comm));
25 if(comm_size%num_core)
26 THROWF(arg_error,0, "allgather SMP simple algorithm can't be used with non multiple of NUM_CORE=%d number of processes ! ", num_core);
28 rank = smpi_comm_rank(comm);
29 MPI_Aint rextent, sextent;
30 rextent = smpi_datatype_get_extent(rtype);
31 sextent = smpi_datatype_get_extent(stype);
32 int tag = COLL_TAG_ALLGATHER;
34 int i, send_offset, recv_offset;
35 int intra_rank, inter_rank;
36 intra_rank = rank % num_core;
37 inter_rank = rank / num_core;
38 int inter_comm_size = (comm_size + num_core - 1) / num_core;
39 int num_core_in_current_smp = num_core;
41 // the last SMP node may have fewer number of running processes than all others
42 if (inter_rank == (inter_comm_size - 1)) {
43 num_core_in_current_smp = comm_size - (inter_rank * num_core);
46 recv_offset = rank * rextent * rcount;
47 smpi_mpi_sendrecv(send_buf, scount, stype, rank, tag,
48 ((char *) recv_buf + recv_offset), rcount, rtype, rank, tag,
50 for (i = 1; i < num_core_in_current_smp; i++) {
53 (inter_rank * num_core) + (intra_rank + i) % (num_core_in_current_smp);
55 (inter_rank * num_core) + (intra_rank - i +
56 num_core_in_current_smp) %
57 (num_core_in_current_smp);
58 recv_offset = src * rextent * rcount;
60 smpi_mpi_sendrecv(send_buf, scount, stype, dst, tag,
61 ((char *) recv_buf + recv_offset), rcount, rtype, src, tag,
66 // INTER-SMP-ALLGATHER
67 // Every root of each SMP node post INTER-Sendrecv, then do INTRA-Bcast for each receiving message
71 if (intra_rank == 0) {
72 MPI_Request *reqs, *req_ptr;
73 int num_req = (inter_comm_size - 1) * 2;
74 reqs = (MPI_Request *) xbt_malloc(num_req * sizeof(MPI_Request));
77 stat = (MPI_Status *) xbt_malloc(num_req * sizeof(MPI_Status));
79 for (i = 1; i < inter_comm_size; i++) {
81 //dst = ((inter_rank+i)%inter_comm_size) * num_core;
82 src = ((inter_rank - i + inter_comm_size) % inter_comm_size) * num_core;
83 //send_offset = (rank * sextent * scount);
84 recv_offset = (src * sextent * scount);
85 // smpi_mpi_sendrecv((recv_buf+send_offset), (scount * num_core), stype, dst, tag,
86 // (recv_buf+recv_offset), (rcount * num_core), rtype, src, tag, comm, &status);
87 //MPIC_Isend((recv_buf+send_offset), (scount * num_core), stype, dst, tag, comm, req_ptr++);
88 *(req_ptr++) = smpi_mpi_irecv(((char *) recv_buf + recv_offset), (rcount * num_core), rtype,
91 for (i = 1; i < inter_comm_size; i++) {
93 dst = ((inter_rank + i) % inter_comm_size) * num_core;
94 //src = ((inter_rank-i+inter_comm_size)%inter_comm_size) * num_core;
95 send_offset = (rank * sextent * scount);
96 //recv_offset = (src * sextent * scount);
97 // smpi_mpi_sendrecv((recv_buf+send_offset), (scount * num_core), stype, dst, tag,
98 // (recv_buf+recv_offset), (rcount * num_core), rtype, src, tag, comm, &status);
99 *(req_ptr++) = smpi_mpi_isend(((char *) recv_buf + send_offset), (scount * num_core), stype,
101 //MPIC_Irecv((recv_buf+recv_offset), (rcount * num_core), rtype, src, tag, comm, req_ptr++);
103 smpi_mpi_waitall(num_req, reqs, stat);
108 //INTRA-BCAST (use flat tree)
110 if (intra_rank == 0) {
111 for (i = 1; i < num_core_in_current_smp; i++) {
112 //printf("rank = %d, num = %d send to %d\n",rank, num_core_in_current_smp, (rank + i));
113 smpi_mpi_send(recv_buf, (scount * comm_size), stype, (rank + i), tag, comm);
116 //printf("rank = %d recv from %d\n",rank, (inter_rank * num_core));
117 smpi_mpi_recv(recv_buf, (rcount * comm_size), rtype, (inter_rank * num_core),