X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/5d308c961af3ea5f561363d2e42b157e8d02b202..e9f0018b823e34405847177b25a85d3facc30ae1:/src/smpi/smpi_coll.c diff --git a/src/smpi/smpi_coll.c b/src/smpi/smpi_coll.c index 57c3600a26..96563752f6 100644 --- a/src/smpi/smpi_coll.c +++ b/src/smpi/smpi_coll.c @@ -1,6 +1,6 @@ /* smpi_coll.c -- various optimized routing for collectives */ -/* Copyright (c) 2009, 2010. The SimGrid Team. +/* Copyright (c) 2009-2015. The SimGrid Team. * All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it @@ -11,194 +11,202 @@ #include #include "private.h" +#include "colls/colls.h" +#include "simgrid/sg_config.h" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_coll, smpi, "Logging specific to SMPI (coll)"); -struct s_proc_tree { - int PROCTREE_A; - int numChildren; - int *child; - int parent; - int me; - int root; - int isRoot; +s_mpi_coll_description_t mpi_coll_gather_description[] = { + {"default", + "gather default collective", + smpi_mpi_gather}, +COLL_GATHERS(COLL_DESCRIPTION, COLL_COMMA), + {NULL, NULL, NULL} /* this array must be NULL terminated */ }; -typedef struct s_proc_tree *proc_tree_t; -/** - * alloc and init - **/ -static proc_tree_t alloc_tree(int arity) -{ - proc_tree_t tree; - int i; - tree = xbt_new(struct s_proc_tree, 1); - tree->PROCTREE_A = arity; - tree->isRoot = 0; - tree->numChildren = 0; - tree->child = xbt_new(int, arity); - for (i = 0; i < arity; i++) { - tree->child[i] = -1; - } - tree->root = -1; - tree->parent = -1; - return tree; -} +s_mpi_coll_description_t mpi_coll_allgather_description[] = { + {"default", + "allgather default collective", + smpi_mpi_allgather}, +COLL_ALLGATHERS(COLL_DESCRIPTION, COLL_COMMA), + {NULL, NULL, NULL} /* this array must be NULL terminated */ +}; -/** - * free - **/ -static void free_tree(proc_tree_t tree) -{ - xbt_free(tree->child); - xbt_free(tree); -} +s_mpi_coll_description_t mpi_coll_allgatherv_description[] = { + {"default", + "allgatherv default collective", + smpi_mpi_allgatherv}, +COLL_ALLGATHERVS(COLL_DESCRIPTION, COLL_COMMA), + {NULL, NULL, NULL} /* this array must be NULL terminated */ +}; -/** - * Build the tree depending on a process rank (index) and the group size (extent) - * @param index the rank of the calling process - * @param extent the total number of processes - **/ -static void build_tree(int index, int extent, proc_tree_t * tree) -{ - int places = (*tree)->PROCTREE_A * index; - int i, ch, pr; +s_mpi_coll_description_t mpi_coll_allreduce_description[] = { + {"default", + "allreduce default collective", + smpi_mpi_allreduce}, +COLL_ALLREDUCES(COLL_DESCRIPTION, COLL_COMMA), + {NULL, NULL, NULL} /* this array must be NULL terminated */ +}; - (*tree)->me = index; - (*tree)->root = 0; - for (i = 1; i <= (*tree)->PROCTREE_A; i++) { - ++places; - ch = (*tree)->PROCTREE_A * index + i + (*tree)->root; - ch %= extent; - if (places < extent) { - (*tree)->child[i - 1] = ch; - (*tree)->numChildren++; - } - } - if (index == (*tree)->root) { - (*tree)->isRoot = 1; - } else { - (*tree)->isRoot = 0; - pr = (index - 1) / (*tree)->PROCTREE_A; - (*tree)->parent = pr; - } -} +s_mpi_coll_description_t mpi_coll_reduce_scatter_description[] = { + {"default", + "reduce_scatter default collective", + smpi_mpi_reduce_scatter}, +COLL_REDUCE_SCATTERS(COLL_DESCRIPTION, COLL_COMMA), + {NULL, NULL, NULL} /* this array must be NULL terminated */ +}; -/** - * bcast - **/ -static void tree_bcast(void *buf, int count, MPI_Datatype datatype, - int root, MPI_Comm comm, proc_tree_t tree) -{ - int system_tag = 999; // used negative int but smpi_create_request() declares this illegal (to be checked) - int rank, i; - MPI_Request *requests; +s_mpi_coll_description_t mpi_coll_scatter_description[] = { + {"default", + "scatter default collective", + smpi_mpi_scatter}, +COLL_SCATTERS(COLL_DESCRIPTION, COLL_COMMA), + {NULL, NULL, NULL} /* this array must be NULL terminated */ +}; - rank = smpi_comm_rank(comm); - /* wait for data from my parent in the tree */ - if (!tree->isRoot) { - DEBUG3("<%d> tree_bcast(): i am not root: recv from %d, tag=%d)", - rank, tree->parent, system_tag + rank); - smpi_mpi_recv(buf, count, datatype, tree->parent, system_tag + rank, - comm, MPI_STATUS_IGNORE); - } - requests = xbt_new(MPI_Request, tree->numChildren); - DEBUG2("<%d> creates %d requests (1 per child)", rank, - tree->numChildren); - /* iniates sends to ranks lower in the tree */ - for (i = 0; i < tree->numChildren; i++) { - if (tree->child[i] == -1) { - requests[i] = MPI_REQUEST_NULL; - } else { - DEBUG3("<%d> send to <%d>, tag=%d", rank, tree->child[i], - system_tag + tree->child[i]); - requests[i] = - smpi_isend_init(buf, count, datatype, tree->child[i], - system_tag + tree->child[i], comm); - } - } - smpi_mpi_startall(tree->numChildren, requests); - smpi_mpi_waitall(tree->numChildren, requests, MPI_STATUS_IGNORE); - xbt_free(requests); -} +s_mpi_coll_description_t mpi_coll_barrier_description[] = { + {"default", + "barrier default collective - ompi selector", + smpi_coll_tuned_barrier_ompi}, +COLL_BARRIERS(COLL_DESCRIPTION, COLL_COMMA), + {NULL, NULL, NULL} /* this array must be NULL terminated */ +}; +s_mpi_coll_description_t mpi_coll_alltoall_description[] = { + {"default", + "Ompi alltoall default collective", + smpi_coll_tuned_alltoall_ompi2}, +COLL_ALLTOALLS(COLL_DESCRIPTION, COLL_COMMA), + {"bruck", + "Alltoall Bruck (SG) collective", + smpi_coll_tuned_alltoall_bruck}, + {"basic_linear", + "Alltoall basic linear (SG) collective", + smpi_coll_tuned_alltoall_basic_linear}, + {NULL, NULL, NULL} /* this array must be NULL terminated */ +}; -/** - * anti-bcast - **/ -static void tree_antibcast(void *buf, int count, MPI_Datatype datatype, - int root, MPI_Comm comm, proc_tree_t tree) +s_mpi_coll_description_t mpi_coll_alltoallv_description[] = { + {"default", + "Ompi alltoallv default collective", + smpi_coll_basic_alltoallv}, +COLL_ALLTOALLVS(COLL_DESCRIPTION, COLL_COMMA), + {NULL, NULL, NULL} /* this array must be NULL terminated */ +}; + +s_mpi_coll_description_t mpi_coll_bcast_description[] = { + {"default", + "bcast default collective - ompi selector", + smpi_coll_tuned_bcast_ompi}, +COLL_BCASTS(COLL_DESCRIPTION, COLL_COMMA), + {NULL, NULL, NULL} /* this array must be NULL terminated */ +}; + +s_mpi_coll_description_t mpi_coll_reduce_description[] = { + {"default", + "reduce default collective", + smpi_mpi_reduce}, +COLL_REDUCES(COLL_DESCRIPTION, COLL_COMMA), + {NULL, NULL, NULL} /* this array must be NULL terminated */ +}; + + + +/** Displays the long description of all registered models, and quit */ +void coll_help(const char *category, s_mpi_coll_description_t * table) { - int system_tag = 999; // used negative int but smpi_create_request() declares this illegal (to be checked) - int rank, i; - MPI_Request *requests; + int i; + printf("Long description of the %s models accepted by this simulator:\n", + category); + for (i = 0; table[i].name; i++) + printf(" %s: %s\n", table[i].name, table[i].description); +} - rank = smpi_comm_rank(comm); - // everyone sends to its parent, except root. - if (!tree->isRoot) { - DEBUG3("<%d> tree_antibcast(): i am not root: send to %d, tag=%d)", - rank, tree->parent, system_tag + rank); - smpi_mpi_send(buf, count, datatype, tree->parent, system_tag + rank, - comm); +int find_coll_description(s_mpi_coll_description_t * table, + char *name, const char *desc) +{ + int i; + char *name_list = NULL; + int selector_on=0; + if(name==NULL){//no argument provided, use active selector's algorithm + name=(char*)sg_cfg_get_string("smpi/coll_selector"); + selector_on=1; } - //every one receives as many messages as it has children - requests = xbt_new(MPI_Request, tree->numChildren); - DEBUG2("<%d> creates %d requests (1 per child)", rank, - tree->numChildren); - for (i = 0; i < tree->numChildren; i++) { - if (tree->child[i] == -1) { - requests[i] = MPI_REQUEST_NULL; - } else { - DEBUG3("<%d> recv from <%d>, tag=%d", rank, tree->child[i], - system_tag + tree->child[i]); - requests[i] = - smpi_irecv_init(buf, count, datatype, tree->child[i], - system_tag + tree->child[i], comm); + for (i = 0; table[i].name; i++) + if (!strcmp(name, table[i].name)) { + if (strcmp(table[i].name,"default")) + XBT_INFO("Switch to algorithm %s for collective %s",table[i].name,desc); + return i; } + + if(selector_on){ + // collective seems not handled by the active selector, try with default one + name=(char*)"default"; + for (i = 0; table[i].name; i++) + if (!strcmp(name, table[i].name)) { + return i; + } + } + if (!table[0].name) + xbt_die("No collective is valid for '%s'! This is a bug.",name); + name_list = xbt_strdup(table[0].name); + for (i = 1; table[i].name; i++) { + name_list = + xbt_realloc(name_list, + strlen(name_list) + strlen(table[i].name) + 3); + strcat(name_list, ", "); + strcat(name_list, table[i].name); } - smpi_mpi_startall(tree->numChildren, requests); - smpi_mpi_waitall(tree->numChildren, requests, MPI_STATUS_IGNORE); - xbt_free(requests); + xbt_die("Collective '%s' is invalid! Valid collectives are: %s.", name, name_list); + return -1; } -/** - * bcast with a binary, ternary, or whatever tree .. - **/ -void nary_tree_bcast(void *buf, int count, MPI_Datatype datatype, int root, - MPI_Comm comm, int arity) -{ - proc_tree_t tree = alloc_tree(arity); - int rank, size; +int (*mpi_coll_gather_fun)(void *, int, MPI_Datatype, void*, int, MPI_Datatype, int root, MPI_Comm); +int (*mpi_coll_allgather_fun)(void *, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm); +int (*mpi_coll_allgatherv_fun)(void *, int, MPI_Datatype, void*, int*, int*, MPI_Datatype, MPI_Comm); +int (*mpi_coll_allreduce_fun)(void *sbuf, void *rbuf, int rcount, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm); +int (*mpi_coll_alltoall_fun)(void *, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm); +int (*mpi_coll_alltoallv_fun)(void *, int*, int*, MPI_Datatype, void*, int*, int*, MPI_Datatype, MPI_Comm); +int (*mpi_coll_bcast_fun)(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm com); +int (*mpi_coll_reduce_fun)(void *buf, void *rbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm); +int (*mpi_coll_reduce_scatter_fun)(void *sbuf, void *rbuf, int *rcounts,MPI_Datatype dtype,MPI_Op op,MPI_Comm comm); +int (*mpi_coll_scatter_fun)(void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvbuf, int recvcount, MPI_Datatype recvtype,int root, MPI_Comm comm); +int (*mpi_coll_barrier_fun)(MPI_Comm comm); - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); - build_tree(rank, size, &tree); - tree_bcast(buf, count, datatype, root, comm, tree); - free_tree(tree); -} -/** - * barrier with a binary, ternary, or whatever tree .. - **/ -void nary_tree_barrier(MPI_Comm comm, int arity) +int smpi_coll_tuned_alltoall_ompi2(void *sendbuf, int sendcount, + MPI_Datatype sendtype, void *recvbuf, + int recvcount, MPI_Datatype recvtype, + MPI_Comm comm) { - proc_tree_t tree = alloc_tree(arity); - int rank, size; - char dummy = '$'; - - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); - build_tree(rank, size, &tree); - tree_antibcast(&dummy, 1, MPI_CHAR, 0, comm, tree); - tree_bcast(&dummy, 1, MPI_CHAR, 0, comm, tree); - free_tree(tree); + int size, sendsize; + size = smpi_comm_size(comm); + sendsize = smpi_datatype_size(sendtype) * sendcount; + if (sendsize < 200 && size > 12) { + return + smpi_coll_tuned_alltoall_bruck(sendbuf, sendcount, sendtype, + recvbuf, recvcount, recvtype, + comm); + } else if (sendsize < 3000) { + return + smpi_coll_tuned_alltoall_basic_linear(sendbuf, sendcount, + sendtype, recvbuf, + recvcount, recvtype, comm); + } else { + return + smpi_coll_tuned_alltoall_ring(sendbuf, sendcount, sendtype, + recvbuf, recvcount, recvtype, + comm); + } } /** * Alltoall Bruck * * Openmpi calls this routine when the message size sent to each rank < 2000 bytes and size < 12 + * FIXME: uh, check smpi_pmpi again, but this routine is called for > 12, not + * less... **/ int smpi_coll_tuned_alltoall_bruck(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, @@ -208,20 +216,21 @@ int smpi_coll_tuned_alltoall_bruck(void *sendbuf, int sendcount, int system_tag = 777; int i, rank, size, err, count; MPI_Aint lb; - MPI_Aint sendextent = 0; - MPI_Aint recvextent = 0; + MPI_Aint sendext = 0; + MPI_Aint recvext = 0; MPI_Request *requests; // FIXME: check implementation rank = smpi_comm_rank(comm); size = smpi_comm_size(comm); - DEBUG1("<%d> algorithm alltoall_bruck() called.", rank); - err = smpi_datatype_extent(sendtype, &lb, &sendextent); - err = smpi_datatype_extent(recvtype, &lb, &recvextent); + XBT_DEBUG("<%d> algorithm alltoall_bruck() called.", rank); + smpi_datatype_extent(sendtype, &lb, &sendext); + smpi_datatype_extent(recvtype, &lb, &recvext); /* Local copy from self */ err = - smpi_datatype_copy(&((char *) sendbuf)[rank * sendextent], sendcount, - sendtype, &((char *) recvbuf)[rank * recvextent], + smpi_datatype_copy((char *)sendbuf + rank * sendcount * sendext, + sendcount, sendtype, + (char *)recvbuf + rank * recvcount * recvext, recvcount, recvtype); if (err == MPI_SUCCESS && size > 1) { /* Initiate all send/recv to/from others. */ @@ -230,38 +239,41 @@ int smpi_coll_tuned_alltoall_bruck(void *sendbuf, int sendcount, /* Create all receives that will be posted first */ for (i = 0; i < size; ++i) { if (i == rank) { - DEBUG3("<%d> skip request creation [src = %d, recvcount = %d]", + XBT_DEBUG("<%d> skip request creation [src = %d, recvcount = %d]", rank, i, recvcount); continue; } requests[count] = - smpi_irecv_init(&((char *) recvbuf)[i * recvextent], recvcount, + smpi_irecv_init((char *)recvbuf + i * recvcount * recvext, recvcount, recvtype, i, system_tag, comm); count++; } /* Now create all sends */ for (i = 0; i < size; ++i) { if (i == rank) { - DEBUG3("<%d> skip request creation [dst = %d, sendcount = %d]", + XBT_DEBUG("<%d> skip request creation [dst = %d, sendcount = %d]", rank, i, sendcount); continue; } requests[count] = - smpi_isend_init(&((char *) sendbuf)[i * sendextent], sendcount, + smpi_isend_init((char *)sendbuf + i * sendcount * sendext, sendcount, sendtype, i, system_tag, comm); count++; } /* Wait for them all. */ smpi_mpi_startall(count, requests); - DEBUG2("<%d> wait for %d requests", rank, count); + XBT_DEBUG("<%d> wait for %d requests", rank, count); smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE); + for(i = 0; i < count; i++) { + if(requests[i]!=MPI_REQUEST_NULL) smpi_mpi_request_free(&requests[i]); + } xbt_free(requests); } return MPI_SUCCESS; } /** - * Alltoall basic_linear + * Alltoall basic_linear (STARMPI:alltoall-simple) **/ int smpi_coll_tuned_alltoall_basic_linear(void *sendbuf, int sendcount, MPI_Datatype sendtype, @@ -271,24 +283,20 @@ int smpi_coll_tuned_alltoall_basic_linear(void *sendbuf, int sendcount, { int system_tag = 888; int i, rank, size, err, count; - MPI_Aint lb; - MPI_Aint sendinc = 0; - MPI_Aint recvinc = 0; + MPI_Aint lb = 0, sendext = 0, recvext = 0; MPI_Request *requests; /* Initialize. */ rank = smpi_comm_rank(comm); size = smpi_comm_size(comm); - DEBUG1("<%d> algorithm alltoall_basic_linear() called.", rank); - err = smpi_datatype_extent(sendtype, &lb, &sendinc); - err = smpi_datatype_extent(recvtype, &lb, &recvinc); - sendinc *= sendcount; - recvinc *= recvcount; + XBT_DEBUG("<%d> algorithm alltoall_basic_linear() called.", rank); + smpi_datatype_extent(sendtype, &lb, &sendext); + smpi_datatype_extent(recvtype, &lb, &recvext); /* simple optimization */ - err = - smpi_datatype_copy(&((char *) sendbuf)[rank * sendinc], sendcount, - sendtype, &((char *) recvbuf)[rank * recvinc], - recvcount, recvtype); + err = smpi_datatype_copy((char *)sendbuf + rank * sendcount * sendext, + sendcount, sendtype, + (char *)recvbuf + rank * recvcount * recvext, + recvcount, recvtype); if (err == MPI_SUCCESS && size > 1) { /* Initiate all send/recv to/from others. */ requests = xbt_new(MPI_Request, 2 * (size - 1)); @@ -296,7 +304,7 @@ int smpi_coll_tuned_alltoall_basic_linear(void *sendbuf, int sendcount, count = 0; for (i = (rank + 1) % size; i != rank; i = (i + 1) % size) { requests[count] = - smpi_irecv_init(&((char *) recvbuf)[i * recvinc], recvcount, + smpi_irecv_init((char *)recvbuf + i * recvcount * recvext, recvcount, recvtype, i, system_tag, comm); count++; } @@ -305,60 +313,24 @@ int smpi_coll_tuned_alltoall_basic_linear(void *sendbuf, int sendcount, * when messages actually arrive in the order in which they were posted. * TODO: check the previous assertion */ - for (i = (rank + size - 1) % size; i != rank; - i = (i + size - 1) % size) { + for (i = (rank + size - 1) % size; i != rank; i = (i + size - 1) % size) { requests[count] = - smpi_isend_init(&((char *) sendbuf)[i * sendinc], sendcount, + smpi_isend_init((char *)sendbuf + i * sendcount * sendext, sendcount, sendtype, i, system_tag, comm); count++; } /* Wait for them all. */ smpi_mpi_startall(count, requests); - DEBUG2("<%d> wait for %d requests", rank, count); + XBT_DEBUG("<%d> wait for %d requests", rank, count); smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE); + for(i = 0; i < count; i++) { + if(requests[i]!=MPI_REQUEST_NULL) smpi_mpi_request_free(&requests[i]); + } xbt_free(requests); } return err; } -/** - * Alltoall pairwise - * - * this algorithm performs size steps (1<=s<=size) and - * at each step s, a process p sends iand receive to.from a unique distinct remote process - * size=5 : s=1: 4->0->1, 0->1->2, 1->2->3, ... - * s=2: 3->0->2, 4->1->3, 0->2->4, 1->3->0 , 2->4->1 - * .... - * Openmpi calls this routine when the message size sent to each rank is greater than 3000 bytes - **/ -int smpi_coll_tuned_alltoall_pairwise(void *sendbuf, int sendcount, - MPI_Datatype sendtype, void *recvbuf, - int recvcount, MPI_Datatype recvtype, - MPI_Comm comm) -{ - int system_tag = 999; - int rank, size, step, sendto, recvfrom, sendsize, recvsize; - - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); - DEBUG1("<%d> algorithm alltoall_pairwise() called.", rank); - sendsize = smpi_datatype_size(sendtype); - recvsize = smpi_datatype_size(recvtype); - /* Perform pairwise exchange - starting from 1 so the local copy is last */ - for (step = 1; step < size + 1; step++) { - /* who do we talk to in this step? */ - sendto = (rank + step) % size; - recvfrom = (rank + size - step) % size; - /* send and receive */ - smpi_mpi_sendrecv(&((char *) sendbuf)[sendto * sendsize * sendcount], - sendcount, sendtype, sendto, system_tag, - &((char *) recvbuf)[recvfrom * recvsize * recvcount], - recvcount, recvtype, recvfrom, system_tag, comm, - MPI_STATUS_IGNORE); - } - return MPI_SUCCESS; -} - int smpi_coll_basic_alltoallv(void *sendbuf, int *sendcounts, int *senddisps, MPI_Datatype sendtype, void *recvbuf, int *recvcounts, @@ -367,22 +339,20 @@ int smpi_coll_basic_alltoallv(void *sendbuf, int *sendcounts, { int system_tag = 889; int i, rank, size, err, count; - MPI_Aint lb; - MPI_Aint sendextent = 0; - MPI_Aint recvextent = 0; + MPI_Aint lb = 0, sendext = 0, recvext = 0; MPI_Request *requests; /* Initialize. */ rank = smpi_comm_rank(comm); size = smpi_comm_size(comm); - DEBUG1("<%d> algorithm basic_alltoallv() called.", rank); - err = smpi_datatype_extent(sendtype, &lb, &sendextent); - err = smpi_datatype_extent(recvtype, &lb, &recvextent); + XBT_DEBUG("<%d> algorithm basic_alltoallv() called.", rank); + smpi_datatype_extent(sendtype, &lb, &sendext); + smpi_datatype_extent(recvtype, &lb, &recvext); /* Local copy from self */ err = - smpi_datatype_copy(&((char *) sendbuf)[senddisps[rank] * sendextent], + smpi_datatype_copy((char *)sendbuf + senddisps[rank] * sendext, sendcounts[rank], sendtype, - &((char *) recvbuf)[recvdisps[rank] * recvextent], + (char *)recvbuf + recvdisps[rank] * recvext, recvcounts[rank], recvtype); if (err == MPI_SUCCESS && size > 1) { /* Initiate all send/recv to/from others. */ @@ -391,33 +361,36 @@ int smpi_coll_basic_alltoallv(void *sendbuf, int *sendcounts, /* Create all receives that will be posted first */ for (i = 0; i < size; ++i) { if (i == rank || recvcounts[i] == 0) { - DEBUG3 + XBT_DEBUG ("<%d> skip request creation [src = %d, recvcounts[src] = %d]", rank, i, recvcounts[i]); continue; } requests[count] = - smpi_irecv_init(&((char *) recvbuf)[recvdisps[i] * recvextent], + smpi_irecv_init((char *)recvbuf + recvdisps[i] * recvext, recvcounts[i], recvtype, i, system_tag, comm); count++; } /* Now create all sends */ for (i = 0; i < size; ++i) { if (i == rank || sendcounts[i] == 0) { - DEBUG3 + XBT_DEBUG ("<%d> skip request creation [dst = %d, sendcounts[dst] = %d]", rank, i, sendcounts[i]); continue; } requests[count] = - smpi_isend_init(&((char *) sendbuf)[senddisps[i] * sendextent], + smpi_isend_init((char *)sendbuf + senddisps[i] * sendext, sendcounts[i], sendtype, i, system_tag, comm); count++; } /* Wait for them all. */ smpi_mpi_startall(count, requests); - DEBUG2("<%d> wait for %d requests", rank, count); + XBT_DEBUG("<%d> wait for %d requests", rank, count); smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE); + for(i = 0; i < count; i++) { + if(requests[i]!=MPI_REQUEST_NULL) smpi_mpi_request_free(&requests[i]); + } xbt_free(requests); } return err;