CHECK_NEGATIVE(2, MPI_ERR_ARG, ndims)
for (int i = 0; i < ndims; i++)
CHECK_NEGATIVE(2, MPI_ERR_ARG, dims[i])
- simgrid::smpi::Topo_Cart* topo = new simgrid::smpi::Topo_Cart(comm_old, ndims, dims, periodic, reorder, comm_cart);
+ const simgrid::smpi::Topo_Cart* topo =
+ new simgrid::smpi::Topo_Cart(comm_old, ndims, dims, periodic, reorder, comm_cart);
if (*comm_cart == MPI_COMM_NULL) {
delete topo;
} else {
if (topo==nullptr) {
return MPI_ERR_ARG;
}
- MPIR_Cart_Topology cart = topo->sub(remain_dims, comm_new);
+ const simgrid::smpi::Topo_Cart* cart = topo->sub(remain_dims, comm_new);
if(*comm_new==MPI_COMM_NULL)
delete cart;
if(cart==nullptr)
template <typename T> int Keyval::attr_get(int keyval, void* attr_value, int* flag){
- smpi_key_elem elem = T::keyvals_.at(keyval);
+ const s_smpi_key_elem_t* elem = T::keyvals_.at(keyval);
if(elem==nullptr)
return MPI_ERR_ARG;
if(attributes()->empty()){
TRACE_smpi_setup_container(rank, sg_host_self());
simgrid::s4u::this_actor::on_exit([self](bool) { smpi_container(self->get_pid())->remove_from_parent(); });
#if HAVE_PAPI
- container_t container = smpi_container(rank);
+ const simgrid::instr::Container* container = smpi_container(rank);
papi_counter_t counters = smpi_process()->papi_counters();
for (auto const& it : counters) {
#if HAVE_PAPI
if (not smpi_cfg_papi_events_file().empty() && TRACE_smpi_is_enabled()) {
- container_t container =
+ const simgrid::instr::Container* container =
simgrid::instr::Container::by_name(std::string("rank-") + std::to_string(simgrid::s4u::this_actor::get_pid()));
- papi_counter_t& counter_data = smpi_process()->papi_counters();
+ const papi_counter_t& counter_data = smpi_process()->papi_counters();
for (auto const& pair : counter_data) {
simgrid::instr::VariableType* variable = static_cast<simgrid::instr::VariableType*>(container->type_->by_name(pair.first));
auto sample = samples.find(loc);
if (sample == samples.end())
xbt_die("Y U NO use SMPI_SAMPLE_* macros? Stop messing directly with smpi_sample_* functions!");
- LocalData& data = sample->second;
+ const LocalData& data = sample->second;
if (data.benching) {
// we need to run a new bench
xbt_die("Y U NO use SMPI_SAMPLE_* macros? Stop messing directly with smpi_sample_* functions!");
if (smpi_process()->sampling()){//end of loop, but still sampling needed
- LocalData& data = sample->second;
+ const LocalData& data = sample->second;
smpi_process()->set_sampling(0);
smpi_execute(data.mean * iter_count);
smpi_bench_begin();
void smpi_deployment_register_process(const std::string& instance_id, int rank, simgrid::s4u::Actor* actor)
{
- Instance& instance = smpi_instances.at(instance_id);
+ const Instance& instance = smpi_instances.at(instance_id);
instance.comm_world_->group()->set_mapping(actor, rank);
}
if (got == -1) {
xbt_assert(errno == EINTR, "Cannot read from %s", src.c_str());
} else {
- char* p = buf;
+ const char* p = buf;
int todo = got;
while (int done = write(fdout, p, todo)) {
if (done == -1) {
static void smpi_get_executable_global_size()
{
char buffer[PATH_MAX];
- char* full_name = realpath(simgrid::xbt::binary_name.c_str(), buffer);
+ const char* full_name = realpath(simgrid::xbt::binary_name.c_str(), buffer);
xbt_assert(full_name != nullptr, "Could not resolve real path of binary file '%s'",
simgrid::xbt::binary_name.c_str());
XBT_DEBUG("Switching data frame to the one of process %ld", actor->get_pid());
simgrid::smpi::ActorExt* process = smpi_process_remote(actor);
int current = process->privatized_region()->file_descriptor;
- void* tmp = mmap(TOPAGE(smpi_data_exe_start), smpi_data_exe_size, PROT_RW, MAP_FIXED | MAP_SHARED, current, 0);
+ const void* tmp = mmap(TOPAGE(smpi_data_exe_start), smpi_data_exe_size, PROT_RW, MAP_FIXED | MAP_SHARED, current, 0);
if (tmp != TOPAGE(smpi_data_exe_start))
xbt_die("Couldn't map the new region (errno %d): %s", errno, strerror(errno));
smpi_loaded_page = actor->get_pid();
void get_requests(std::vector<MPI_Request>& vec)
{
- for (auto& pair : store) {
+ for (auto const& pair : store) {
auto& req = pair.second;
auto my_proc_id = simgrid::s4u::this_actor::get_pid();
if (req != MPI_REQUEST_NULL && (req->src() == my_proc_id || req->dst() == my_proc_id)) {
std::vector<std::pair</*sender*/int,/*recv*/int>> sender_receiver;
std::vector<MPI_Request> reqs;
req_storage.get_requests(reqs);
- for (const auto& req : reqs) {
+ for (auto const& req : reqs) {
if (req && (req->flags() & MPI_REQ_RECV)) {
sender_receiver.push_back({req->src(), req->dst()});
}
Request::waitall(count_requests, &(reqs.data())[0], MPI_STATUSES_IGNORE);
req_storage.get_store().clear();
- for (auto& pair : sender_receiver) {
+ for (auto const& pair : sender_receiver) {
TRACE_smpi_recv(pair.first, pair.second, 0);
}
TRACE_smpi_comm_out(my_proc_id);
smpi_shared_malloc_bogusfile = mkstemp(name);
XBT_DEBUG("bogusfile : %s\n", name);
unlink(name);
- char* dumb = new char[smpi_shared_malloc_blocksize](); // zero initialized
+ const char* dumb = new char[smpi_shared_malloc_blocksize](); // zero initialized
ssize_t err = write(smpi_shared_malloc_bogusfile, dumb, smpi_shared_malloc_blocksize);
if(err<0)
xbt_die("Could not write bogus file for shared malloc");
for (size_t offset = start_block_offset; offset < stop_block_offset; offset += smpi_shared_malloc_blocksize) {
XBT_DEBUG("\t\tglobal shared allocation, mmap block offset %zx", offset);
void* pos = (void*)((unsigned long)mem + offset);
- void* res = mmap(pos, smpi_shared_malloc_blocksize, PROT_READ | PROT_WRITE, mmap_flag,
- huge_fd, 0);
+ const void* res = mmap(pos, smpi_shared_malloc_blocksize, PROT_READ | PROT_WRITE, mmap_flag, huge_fd, 0);
xbt_assert(res == pos, "Could not map folded virtual memory (%s). Do you perhaps need to increase the "
"size of the mapped file using --cfg=smpi/shared-malloc-blocksize:newvalue (default 1048576) ? "
"You can also try using the sysctl vm.max_map_count. "
if(low_page_start_offset < low_page_stop_offset) {
XBT_DEBUG("\t\tglobal shared allocation, mmap block start");
void* pos = (void*)((unsigned long)mem + low_page_start_offset);
- void* res = mmap(pos, low_page_stop_offset-low_page_start_offset, PROT_READ | PROT_WRITE, mmap_base_flag, // not a full huge page
- smpi_shared_malloc_bogusfile, 0);
+ const void* res = mmap(pos, low_page_stop_offset - low_page_start_offset, PROT_READ | PROT_WRITE,
+ mmap_base_flag, // not a full huge page
+ smpi_shared_malloc_bogusfile, 0);
xbt_assert(res == pos, "Could not map folded virtual memory (%s). Do you perhaps need to increase the "
"size of the mapped file using --cfg=smpi/shared-malloc-blocksize:newvalue (default 1048576) ?"
"You can also try using the sysctl vm.max_map_count",
size_t high_page_stop_offset = stop_offset == size ? size : ALIGN_DOWN((int64_t)stop_offset, PAGE_SIZE);
if(high_page_stop_offset > stop_block_offset) {
void* pos = (void*)((unsigned long)mem + stop_block_offset);
- void* res = mmap(pos, high_page_stop_offset-stop_block_offset, PROT_READ | PROT_WRITE, mmap_base_flag, // not a full huge page
- smpi_shared_malloc_bogusfile, 0);
+ const void* res = mmap(pos, high_page_stop_offset - stop_block_offset, PROT_READ | PROT_WRITE,
+ mmap_base_flag, // not a full huge page
+ smpi_shared_malloc_bogusfile, 0);
xbt_assert(res == pos, "Could not map folded virtual memory (%s). Do you perhaps need to increase the "
"size of the mapped file using --cfg=smpi/shared-malloc-blocksize:newvalue (default 1048576) ?"
"You can also try using the sysctl vm.max_map_count",
if (request->action_ != nullptr){
kernel::activity::CommImplPtr sync_comm = boost::static_pointer_cast<kernel::activity::CommImpl>(request->action_);
- MPI_Request req = static_cast<MPI_Request>(sync_comm->src_data_);
+ const Request* req = static_cast<MPI_Request>(sync_comm->src_data_);
*flag = 1;
if (status != MPI_STATUS_IGNORE && (req->flags_ & MPI_REQ_PREPARED) == 0) {
status->MPI_SOURCE = comm->group()->rank(req->src_);
/* Get # of free-to-be-assigned processes and # of free dimensions */
int freeprocs = nnodes;
int freedims = 0;
- int *p = dims;
+ const int* p = dims;
for (int i = 0; i < ndims; ++i) {
if (*p == 0) {
++freedims;
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Request* request)
{
//get receiver pointer
- MPI_Win recv_win = connected_wins_[target_rank];
+ const Win* recv_win = connected_wins_[target_rank];
if(opened_==0){//check that post/start has been done
// no fence or start .. lock ok ?
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Request* request)
{
//get sender pointer
- MPI_Win send_win = connected_wins_[target_rank];
+ const Win* send_win = connected_wins_[target_rank];
if(opened_==0){//check that post/start has been done
// no fence or start .. lock ok ?
if(target_count*target_datatype->get_extent()>send_win->size_)
return MPI_ERR_ARG;
- void* send_addr = static_cast<void*>(static_cast<char*>(send_win->base_) + target_disp * send_win->disp_unit_);
+ const void* send_addr = static_cast<void*>(static_cast<char*>(send_win->base_) + target_disp * send_win->disp_unit_);
XBT_DEBUG("Entering MPI_Get from %d", target_rank);
if(target_rank != comm_->rank()){
{
XBT_DEBUG("Entering MPI_Win_Accumulate");
//get receiver pointer
- MPI_Win recv_win = connected_wins_[target_rank];
+ const Win* recv_win = connected_wins_[target_rank];
if(opened_==0){//check that post/start has been done
// no fence or start .. lock ok ?
int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Request*)
{
//get sender pointer
- MPI_Win send_win = connected_wins_[target_rank];
+ const Win* send_win = connected_wins_[target_rank];
if(opened_==0){//check that post/start has been done
// no fence or start .. lock ok ?
void *result_addr, MPI_Datatype datatype, int target_rank,
MPI_Aint target_disp){
//get sender pointer
- MPI_Win send_win = connected_wins_[target_rank];
+ const Win* send_win = connected_wins_[target_rank];
if(opened_==0){//check that post/start has been done
// no fence or start .. lock ok ?
int Win::shared_query(int rank, MPI_Aint* size, int* disp_unit, void* baseptr)
{
- MPI_Win target_win = rank != MPI_PROC_NULL ? connected_wins_[rank] : nullptr;
+ const Win* target_win = rank != MPI_PROC_NULL ? connected_wins_[rank] : nullptr;
for (int i = 0; not target_win && i < comm_->size(); i++) {
if (connected_wins_[i]->size_ > 0)
target_win = connected_wins_[i];
std::vector<simgrid::s4u::ActorPtr> all_actors =
engine->get_filtered_actors([](simgrid::s4u::ActorPtr actor) { return not actor->is_daemon(); });
- for (auto& actor : all_actors) {
+ for (auto const& actor : all_actors) {
new_mapping.assign(actor, actor->get_host());
}
// Sort the actors, from highest to lowest load; we then just iterate over these actors
heap_handle update_handle = usable_hosts.push(host); // Required to update elements in the heap
additional_load[host] = {update_handle, 0}; // Save the handle for later
const double total_flops_computed = sg_host_get_computed_flops(host);
- for (auto& actor : actors) {
+ for (auto const& actor : actors) {
additional_load[host].load += actor_computation[actor->get_pid()] / total_flops_computed; // Normalize load - this allows comparison
// even between hosts with different frequencies
XBT_DEBUG("Actor %li -> %f", actor->get_pid(), actor_computation[actor->get_pid()]);
}
// Implementation of the Greedy algorithm
- for (auto& actor : all_actors) {
+ for (auto const& actor : all_actors) {
simgrid::s4u::Host* target_host = usable_hosts.top(); // This is the host with the lowest load
simgrid::s4u::Host* cur_mapped_host = new_mapping.get_host(actor);