+/* Copyright (c) 2013-2014. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
#include "colls_private.h"
/* IMPLEMENTED BY PITCH PATARASUK
Non-topoloty-specific (however, number of cores/node need to be changed)
inter-communication
The communication are done in a pipeline fashion */
-/* change number of core per smp-node
- we assume that number of core per process will be the same for all implementations */
-#ifndef NUM_CORE
-#define NUM_CORE 8
-#endif
+
/* this is a default segment size for pipelining,
but it is typically passed as a command line argument */
int tag = COLL_TAG_ALLREDUCE;
int mask, src, dst;
MPI_Status status;
- int num_core = simcall_host_get_core(SIMIX_host_self());
- // do we use the default one or the number of cores in the platform ?
- // if the number of cores is one, the platform may be simulated with 1 node = 1 core
- if (num_core == 1) num_core = NUM_CORE;
+ if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){
+ smpi_comm_init_smp(comm);
+ }
+ int num_core=1;
+ if (smpi_comm_is_uniform(comm)){
+ num_core = smpi_comm_size(smpi_comm_get_intra_comm(comm));
+ }
comm_size = smpi_comm_size(comm);
rank = smpi_comm_rank(comm);
MPI_Aint extent;
extent = smpi_datatype_get_extent(dtype);
- tmp_buf = (void *) xbt_malloc(count * extent);
+ tmp_buf = (void *) smpi_get_tmp_sendbuffer(count * extent);
int intra_rank, inter_rank;
intra_rank = rank % num_core;
}
} // for phase
- free(tmp_buf);
+ smpi_free_tmp_buffer(tmp_buf);
return MPI_SUCCESS;
}