/* smpi_coll.c -- various optimized routing for collectives */
-/* Copyright (c) 2009-2019. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2009-2022. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
namespace simgrid {
namespace smpi {
-std::map<std::string, std::vector<s_mpi_coll_description_t>> smpi_coll_descriptions(
+std::map<std::string, std::vector<s_mpi_coll_description_t>, std::less<>> smpi_coll_descriptions(
{{std::string("gather"),
{{"default", "gather default collective", (void*)gather__default},
{"ompi", "gather ompi collective", (void*)gather__ompi},
{"ompi_basic_recursivehalving", "reduce_scatter ompi_basic_recursivehalving collective",
(void*)reduce_scatter__ompi_basic_recursivehalving},
{"ompi_ring", "reduce_scatter ompi_ring collective", (void*)reduce_scatter__ompi_ring},
+ {"ompi_butterfly", "reduce_scatter ompi_butterfly collective", (void*)reduce_scatter__ompi_butterfly},
{"mpich", "reduce_scatter mpich collective", (void*)reduce_scatter__mpich},
{"mpich_pair", "reduce_scatter mpich_pair collective", (void*)reduce_scatter__mpich_pair},
{"mpich_rdb", "reduce_scatter mpich_rdb collective", (void*)reduce_scatter__mpich_rdb},
{{"default", "scatter default collective", (void*)scatter__default},
{"ompi", "scatter ompi collective", (void*)scatter__ompi},
{"ompi_basic_linear", "scatter ompi_basic_linear collective", (void*)scatter__ompi_basic_linear},
+ {"ompi_linear_nb", "scatter ompi_linear nonblocking collective", (void*)scatter__ompi_linear_nb},
{"ompi_binomial", "scatter ompi_binomial collective", (void*)scatter__ompi_binomial},
{"mpich", "scatter mpich collective", (void*)scatter__mpich},
{"mvapich2", "scatter mvapich2 collective", (void*)scatter__mvapich2},
std::vector<s_mpi_coll_description_t>* colls::get_smpi_coll_descriptions(const std::string& name)
{
auto iter = smpi_coll_descriptions.find(name);
- if (iter == smpi_coll_descriptions.end())
- xbt_die("No collective named %s. This is a bug.", name.c_str());
+ xbt_assert(iter != smpi_coll_descriptions.end(), "No collective named %s. This is a bug.", name.c_str());
return &iter->second;
}
static s_mpi_coll_description_t* find_coll_description(const std::string& collective, const std::string& algo)
{
std::vector<s_mpi_coll_description_t>* table = colls::get_smpi_coll_descriptions(collective);
- if (table->empty())
- xbt_die("No registered algorithm for collective '%s'! This is a bug.", collective.c_str());
-
- for (unsigned long i = 0; i < table->size(); i++) {
- auto desc = &table->at(i);
- if (algo == desc->name) {
- if (desc->name != "default")
- XBT_INFO("Switch to algorithm %s for collective %s", desc->name.c_str(), collective.c_str());
- return desc;
+ xbt_assert(not table->empty(), "No registered algorithm for collective '%s'! This is a bug.", collective.c_str());
+
+ for (auto& desc : *table) {
+ if (algo == desc.name) {
+ if (desc.name != "default")
+ XBT_INFO("Switch to algorithm %s for collective %s", desc.name.c_str(), collective.c_str());
+ return &desc;
}
}
{ \
auto desc = find_coll_description(_XBT_STRINGIFY(cat), name); \
cat = reinterpret_cast<ret(*) args>(desc->coll); \
- if (cat == nullptr) \
- xbt_die("Collective " _XBT_STRINGIFY(cat) " set to nullptr!"); \
+ xbt_assert(cat != nullptr, "Collective " _XBT_STRINGIFY(cat) " set to nullptr!"); \
}
COLL_APPLY(COLL_SETTER, COLL_GATHER_SIG, "")
COLL_APPLY(COLL_SETTER,COLL_ALLGATHER_SIG,"")
Datatype::copy(sendbuf, count, datatype, recvbuf, count, datatype);
// Send/Recv buffers to/from others
- MPI_Request* requests = new MPI_Request[size - 1];
- unsigned char** tmpbufs = new unsigned char*[rank];
+ auto* requests = new MPI_Request[size - 1];
+ auto** tmpbufs = new unsigned char*[rank];
int index = 0;
for (int other = 0; other < rank; other++) {
tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
break;
}
if(index < rank) {
- // #Request is below rank: it's a irecv
+ // #Request is below rank: it's an irecv
op->apply( tmpbufs[index], recvbuf, &count, datatype);
}
}
datatype->extent(&lb, &dataext);
// Send/Recv buffers to/from others
- MPI_Request* requests = new MPI_Request[size - 1];
- unsigned char** tmpbufs = new unsigned char*[rank];
+ auto* requests = new MPI_Request[size - 1];
+ auto** tmpbufs = new unsigned char*[rank];
int index = 0;
for (int other = 0; other < rank; other++) {
tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
Datatype::copy(tmpbufs[index], count, datatype, recvbuf, count, datatype);
recvbuf_is_empty=0;
} else
- // #Request is below rank: it's a irecv
+ // #Request is below rank: it's an irecv
op->apply( tmpbufs[index], recvbuf, &count, datatype);
}
}