#include "simgrid/sg_config.h"
#include "colls/colls.h"
-#include "src/simix/SynchroComm.hpp"
+#include "src/kernel/activity/SynchroComm.hpp"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_base, smpi, "Logging specific to SMPI (base)");
-static int match_recv(void* a, void* b, smx_synchro_t ignored) {
+static int match_recv(void* a, void* b, smx_activity_t ignored) {
MPI_Request ref = static_cast<MPI_Request>(a);
MPI_Request req = static_cast<MPI_Request>(b);
XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag);
}else return 0;
}
-static int match_send(void* a, void* b,smx_synchro_t ignored) {
+static int match_send(void* a, void* b,smx_activity_t ignored) {
MPI_Request ref = static_cast<MPI_Request>(a);
MPI_Request req = static_cast<MPI_Request>(b);
XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag);
// These are taken from surf/network.c and generalized to have more values for each factor
typedef struct s_smpi_factor_multival *smpi_os_factor_multival_t;
typedef struct s_smpi_factor_multival { // FIXME: this should be merged (deduplicated) with s_smpi_factor defined in network_smpi.c
- long factor=0;
- std::vector<double> values; /** We allocate arbitrarily 4 elements **/
+ size_t factor=0;
+ std::vector<double> values;
} s_smpi_factor_multival_t;
std::vector<s_smpi_factor_multival_t> smpi_os_values;
static simgrid::config::Flag<double> smpi_test_sleep(
"smpi/test", "Minimum time to inject inside a call to MPI_Test", 1e-4);
-static int factor_cmp(const s_smpi_factor_multival_t& pa, const s_smpi_factor_multival_t& pb)
-{
- return (pa.factor > pb.factor) ? 1 :
- (pa.factor < pb.factor) ? -1 : 0;
-}
-
static std::vector<s_smpi_factor_multival_t> parse_factor(const char *smpi_coef_string)
{
std::vector<s_smpi_factor_multival_t> smpi_factor;
- s_smpi_factor_multival_t fact;
/** Setup the tokenizer that parses the string **/
typedef boost::tokenizer<boost::char_separator<char>> Tokenizer;
*/
for (Tokenizer::iterator token_iter = tokens.begin();
token_iter != tokens.end(); token_iter++) {
+XBT_DEBUG("token : %s", token_iter->c_str());
Tokenizer factor_values(*token_iter, factor_separator);
-
+ s_smpi_factor_multival_t fact;
if (factor_values.begin() == factor_values.end()) {
xbt_die("Malformed radical for smpi factor: '%s'", smpi_coef_string);
}
}
else {
errmsg = bprintf("Invalid factor value %d in chunk #%zu: %%s", iteration, smpi_factor.size()+1);
- fact.values.push_back(xbt_str_parse_double((*factor_iter).c_str(), errmsg));
+ fact.values.push_back(xbt_str_parse_double(factor_iter->c_str(), errmsg));
}
xbt_free(errmsg);
}
smpi_factor.push_back(fact);
- XBT_DEBUG("smpi_factor:\t%ld : %zu values, first: %f", fact.factor, smpi_factor.size(), fact.values[0]);
+ XBT_DEBUG("smpi_factor:\t%zu : %zu values, first: %f", fact.factor, smpi_factor.size(), fact.values[0]);
}
- std::sort(smpi_factor.begin(), smpi_factor.end(), &factor_cmp);
+ std::sort(smpi_factor.begin(), smpi_factor.end(),
+ [](const s_smpi_factor_multival_t &pa,
+ const s_smpi_factor_multival_t &pb) {
+ return (pa.factor < pb.factor);
+ });
for (auto& fact : smpi_factor) {
- XBT_DEBUG("smpi_factor:\t%ld : %zu values, first: %f", fact.factor, smpi_factor.size() ,fact.values[0]);
+ XBT_DEBUG("smpi_factor:\t%zu : %zu values, first: %f", fact.factor, smpi_factor.size() ,fact.values[0]);
}
return smpi_factor;
}
-static double smpi_os(double size)
+static double smpi_os(size_t size)
{
if (smpi_os_values.empty()) {
smpi_os_values = parse_factor(xbt_cfg_get_string("smpi/os"));
}
- double current=0.0;
+ double current=smpi_os_values.empty()?0.0:smpi_os_values[0].values[0]+smpi_os_values[0].values[1]*size;
// Iterate over all the sections that were specified and find the right
// value. (fact.factor represents the interval sizes; we want to find the
// section that has fact.factor <= size and no other such fact.factor <= size)
- // Note: parse_factor() (used before) already sorts the dynar we iterate over!
+ // Note: parse_factor() (used before) already sorts the vector we iterate over!
for (auto& fact : smpi_os_values) {
if (size <= fact.factor) { // Values already too large, use the previously
// computed value of current!
- XBT_DEBUG("os : %f <= %ld return %f", size, fact.factor, current);
+ XBT_DEBUG("os : %zu <= %zu return %.10f", size, fact.factor, current);
return current;
}else{
// If the next section is too large, the current section must be used.
current = fact.values[0]+fact.values[1]*size;
}
}
- XBT_DEBUG("Searching for smpi/os: %f is larger than the largest boundary, return %f", size, current);
+ XBT_DEBUG("Searching for smpi/os: %zu is larger than the largest boundary, return %.10f", size, current);
return current;
}
-static double smpi_ois(double size)
+static double smpi_ois(size_t size)
{
if (smpi_ois_values.empty()) {
smpi_ois_values = parse_factor(xbt_cfg_get_string("smpi/ois"));
}
- double current=0.0;
+ double current=smpi_ois_values.empty()?0.0:smpi_ois_values[0].values[0]+smpi_ois_values[0].values[1]*size;
// Iterate over all the sections that were specified and find the right value. (fact.factor represents the interval
// sizes; we want to find the section that has fact.factor <= size and no other such fact.factor <= size)
- // Note: parse_factor() (used before) already sorts the dynar we iterate over!
+ // Note: parse_factor() (used before) already sorts the vector we iterate over!
for (auto& fact : smpi_ois_values) {
if (size <= fact.factor) { // Values already too large, use the previously computed value of current!
- XBT_DEBUG("ois : %f <= %ld return %f", size, fact.factor, current);
+ XBT_DEBUG("ois : %zu <= %zu return %.10f", size, fact.factor, current);
return current;
}else{
// If the next section is too large, the current section must be used.
current = fact.values[0]+fact.values[1]*size;
}
}
- XBT_DEBUG("Searching for smpi/ois: %f is larger than the largest boundary, return %f", size, current);
+ XBT_DEBUG("Searching for smpi/ois: %zu is larger than the largest boundary, return %.10f", size, current);
return current;
}
-static double smpi_or(double size)
+static double smpi_or(size_t size)
{
if (smpi_or_values.empty()) {
smpi_or_values = parse_factor(xbt_cfg_get_string("smpi/or"));
}
- double current=0.0;
+
+ double current=smpi_or_values.empty()?0.0:smpi_or_values.front().values[0]+smpi_or_values.front().values[1]*size;
+
// Iterate over all the sections that were specified and find the right value. (fact.factor represents the interval
// sizes; we want to find the section that has fact.factor <= size and no other such fact.factor <= size)
- // Note: parse_factor() (used before) already sorts the dynar we iterate over!
+ // Note: parse_factor() (used before) already sorts the vector we iterate over!
for (auto fact : smpi_or_values) {
- if (size <= fact.factor) { // Values already too large, use the previously
- // computed value of current!
- XBT_DEBUG("or : %f <= %ld return %f", size, fact.factor, current);
+ if (size <= fact.factor) { // Values already too large, use the previously computed value of current!
+ XBT_DEBUG("or : %zu <= %zu return %.10f", size, fact.factor, current);
return current;
} else {
// If the next section is too large, the current section must be used.
current=fact.values[0]+fact.values[1]*size;
}
}
- XBT_DEBUG("smpi_or: %f is larger than largest boundary, return %f", size, current);
+ XBT_DEBUG("smpi_or: %zu is larger than largest boundary, return %.10f", size, current);
return current;
}
double smpi_mpi_wtime(){
double time;
- if (smpi_process_initialized() != 0 &&
- smpi_process_finalized() == 0 &&
- smpi_process_get_sampling() == 0) {
+ if (smpi_process_initialized() != 0 && smpi_process_finalized() == 0 && smpi_process_get_sampling() == 0) {
smpi_bench_end();
time = SIMIX_get_clock();
// to avoid deadlocks if used as a break condition, such as
//begin with the more appropriate one : the small one.
mailbox = smpi_process_mailbox_small();
XBT_DEBUG("Is there a corresponding send already posted in the small mailbox %p (in case of SSEND)?", mailbox);
- smx_synchro_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, static_cast<void*>(request));
+ smx_activity_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, static_cast<void*>(request));
if (action == nullptr) {
mailbox = smpi_process_mailbox();
else {
mailbox = smpi_process_mailbox_small();
XBT_DEBUG("Is there a corresponding send already posted the small mailbox?");
- smx_synchro_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, (void*)request);
+ smx_activity_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, (void*)request);
if (action == nullptr) {
XBT_DEBUG("No, nothing in the permanent receive mailbox");
else if (((request->flags & RMA) != 0) || static_cast<int>(request->size) < async_small_thresh) { // eager mode
mailbox = smpi_process_remote_mailbox(receiver);
XBT_DEBUG("Is there a corresponding recv already posted in the large mailbox %p?", mailbox);
- smx_synchro_t action = simcall_comm_iprobe(mailbox, 1,request->dst, request->tag, &match_send, static_cast<void*>(request));
+ smx_activity_t action = simcall_comm_iprobe(mailbox, 1,request->dst, request->tag, &match_send, static_cast<void*>(request));
if (action == nullptr) {
if ((request->flags & SSEND) == 0){
mailbox = smpi_process_remote_mailbox_small(receiver);
int smpi_mpi_testany(int count, MPI_Request requests[], int *index, MPI_Status * status)
{
- xbt_dynar_t comms;
+ std::vector<simgrid::kernel::activity::ActivityImpl*> comms;
+ comms.reserve(count);
+
int i;
- int* map;
int flag = 0;
- int size = 0;
*index = MPI_UNDEFINED;
- comms = xbt_dynar_new(sizeof(smx_synchro_t), nullptr);
- map = xbt_new(int, count);
+
+ std::vector<int> map; /** Maps all matching comms back to their location in requests **/
for(i = 0; i < count; i++) {
if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action && !(requests[i]->flags & PREPARED)) {
- xbt_dynar_push(comms, &requests[i]->action);
- map[size] = i;
- size++;
+ comms.push_back(requests[i]->action);
+ map.push_back(i);
}
}
- if(size > 0) {
+ if(!map.empty()) {
//multiplier to the sleeptime, to increase speed of execution, each failed testany will increase it
static int nsleeps = 1;
if(smpi_test_sleep > 0)
simcall_process_sleep(nsleeps*smpi_test_sleep);
- i = simcall_comm_testany(comms);
- // not MPI_UNDEFINED, as this is a simix return code
- if(i != -1) {
- *index = map[i];
+ i = simcall_comm_testany(comms.data(), comms.size()); // The i-th element in comms matches!
+ if (i != -1) { // -1 is not MPI_UNDEFINED but a SIMIX return code. (nothing matches)
+ *index = map[i];
finish_wait(&requests[*index], status);
- if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags & NON_PERSISTENT))
- requests[*index] = MPI_REQUEST_NULL;
- flag = 1;
- nsleeps=1;
- }else{
+ flag = 1;
+ nsleeps = 1;
+ if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags & NON_PERSISTENT)) {
+ requests[*index] = MPI_REQUEST_NULL;
+ }
+ } else {
nsleeps++;
}
- }else{
+ } else {
//all requests are null or inactive, return true
- flag=1;
+ flag = 1;
smpi_empty_status(status);
}
- xbt_free(map);
- xbt_dynar_free(&comms);
return flag;
}
MPI_Status stat;
MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
int flag=1;
- int i;
- for(i=0; i<count; i++){
+ for(int i=0; i<count; i++){
if (requests[i] != MPI_REQUEST_NULL && !(requests[i]->flags & PREPARED)) {
if (smpi_mpi_test(&requests[i], pstat)!=1){
flag=0;
}
if (request->action != nullptr){
- simgrid::simix::Comm *sync_comm = static_cast<simgrid::simix::Comm*>(request->action);
- MPI_Request req = static_cast<MPI_Request>(sync_comm->src_data);
+ simgrid::kernel::activity::Comm *sync_comm = static_cast<simgrid::kernel::activity::Comm*>(request->action);
+ MPI_Request req = static_cast<MPI_Request>(sync_comm->src_data);
*flag = 1;
if(status != MPI_STATUS_IGNORE && (req->flags & PREPARED) == 0) {
status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(comm), req->src);
if(count > 0) {
// Wait for a request to complete
- comms = xbt_dynar_new(sizeof(smx_synchro_t), nullptr);
+ comms = xbt_dynar_new(sizeof(smx_activity_t), nullptr);
map = xbt_new(int, count);
XBT_DEBUG("Wait for one of %d", count);
for(i = 0; i < count; i++) {
}
}
if(size > 0) {
- i = simcall_comm_waitany(comms);
+ i = simcall_comm_waitany(comms, -1);
// not MPI_UNDEFINED, as this is a simix return code
if (i != -1) {
void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
{
int system_tag = COLL_TAG_SCATTER;
- int rank, size, dst, index;
- MPI_Aint lb = 0, sendext = 0;
+ int dst;
+ MPI_Aint lb = 0;
+ MPI_Aint sendext = 0;
MPI_Request *requests;
- rank = smpi_comm_rank(comm);
- size = smpi_comm_size(comm);
+ int rank = smpi_comm_rank(comm);
+ int size = smpi_comm_size(comm);
if(rank != root) {
// Recv buffer from root
smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm, MPI_STATUS_IGNORE);
}
// Send buffers to receivers
requests = xbt_new(MPI_Request, size - 1);
- index = 0;
+ int index = 0;
for(dst = 0; dst < size; dst++) {
if(dst != root) {
requests[index] = smpi_isend_init(static_cast<char *>(sendbuf) + dst * sendcount * sendext, sendcount, sendtype, dst,
MPI_Datatype recvtype, int root, MPI_Comm comm)
{
int system_tag = COLL_TAG_SCATTERV;
- int rank, size, dst, index;
- MPI_Aint lb = 0, sendext = 0;
+ int dst;
+ MPI_Aint lb = 0;
+ MPI_Aint sendext = 0;
MPI_Request *requests;
- rank = smpi_comm_rank(comm);
- size = smpi_comm_size(comm);
+ int rank = smpi_comm_rank(comm);
+ int size = smpi_comm_size(comm);
if(rank != root) {
// Recv buffer from root
smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm, MPI_STATUS_IGNORE);
}
// Send buffers to receivers
requests = xbt_new(MPI_Request, size - 1);
- index = 0;
+ int index = 0;
for(dst = 0; dst < size; dst++) {
if(dst != root) {
requests[index] = smpi_isend_init(static_cast<char *>(sendbuf) + displs[dst] * sendext, sendcounts[dst],
MPI_Comm comm)
{
int system_tag = COLL_TAG_REDUCE;
- int rank, size, src, index;
- MPI_Aint lb = 0, dataext = 0;
+ int src, index;
+ MPI_Aint lb = 0;
+ MPI_Aint dataext = 0;
MPI_Request *requests;
void **tmpbufs;
char* sendtmpbuf = static_cast<char *>(sendbuf);
-
- rank = smpi_comm_rank(comm);
- size = smpi_comm_size(comm);
+ int rank = smpi_comm_rank(comm);
+ int size = smpi_comm_size(comm);
//non commutative case, use a working algo from openmpi
if(!smpi_op_is_commute(op)){
smpi_coll_tuned_reduce_ompi_basic_linear(sendtmpbuf, recvbuf, count, datatype, op, root, comm);
void smpi_mpi_scan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
{
int system_tag = -888;
- int rank, size, other, index;
+ int other, index;
MPI_Aint lb = 0, dataext = 0;
MPI_Request *requests;
void **tmpbufs;
- rank = smpi_comm_rank(comm);
- size = smpi_comm_size(comm);
+ int rank = smpi_comm_rank(comm);
+ int size = smpi_comm_size(comm);
smpi_datatype_extent(datatype, &lb, &dataext);
void smpi_mpi_exscan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
{
int system_tag = -888;
- int rank, size, other, index;
+ int other, index;
MPI_Aint lb = 0, dataext = 0;
MPI_Request *requests;
void **tmpbufs;
int recvbuf_is_empty=1;
- rank = smpi_comm_rank(comm);
- size = smpi_comm_size(comm);
+ int rank = smpi_comm_rank(comm);
+ int size = smpi_comm_size(comm);
smpi_datatype_extent(datatype, &lb, &dataext);
index = 0;
for(other = 0; other < rank; other++) {
tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
- requests[index] =
- smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm);
+ requests[index] = smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm);
index++;
}
for(other = rank + 1; other < size; other++) {
- requests[index] =
- smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm);
+ requests[index] = smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm);
index++;
}
// Wait for completion of all comms.
if(recvbuf_is_empty){
smpi_datatype_copy(tmpbufs[index], count, datatype, recvbuf, count, datatype);
recvbuf_is_empty=0;
- }else
- // #Request is below rank: it's a irecv
- smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
+ } else
+ // #Request is below rank: it's a irecv
+ smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
}
}
}else{
if(recvbuf_is_empty){
smpi_datatype_copy(tmpbufs[other], count, datatype, recvbuf, count, datatype);
recvbuf_is_empty=0;
- }else smpi_op_apply(op, tmpbufs[other], recvbuf, &count, &datatype);
+ } else
+ smpi_op_apply(op, tmpbufs[other], recvbuf, &count, &datatype);
}
}
}