void mpi_win_set_attr_(int* win, int* type_keyval, MPI_Aint* att, int* ierr)
{
- MPI_Aint* val = (MPI_Aint*)xbt_malloc(sizeof(MPI_Aint));
- *val = *att;
- *ierr = MPI_Win_set_attr(simgrid::smpi::Win::f2c(*win), *type_keyval, val);
+ auto* val = static_cast<MPI_Aint*>(xbt_malloc(sizeof(MPI_Aint)));
+ *val = *att;
+ *ierr = MPI_Win_set_attr(simgrid::smpi::Win::f2c(*win), *type_keyval, val);
}
void mpi_win_delete_attr_(int* win, int* comm_keyval, int* ierr)
}
void mpi_attr_put_ (int* comm, int* keyval, int* attr_value, int* ierr) {
- int* val = (int*)xbt_malloc(sizeof(int));
- *val=*attr_value;
- *ierr = MPI_Attr_put(simgrid::smpi::Comm::f2c(*comm), *keyval, val);
+ auto* val = static_cast<int*>(xbt_malloc(sizeof(int)));
+ *val = *attr_value;
+ *ierr = MPI_Attr_put(simgrid::smpi::Comm::f2c(*comm), *keyval, val);
}
void mpi_keyval_create_ (void* copy_fn, void* delete_fn, int* keyval, void* extra_state, int* ierr) {
void mpi_alltoallw_ ( void *sendbuf, int *sendcnts, int *sdispls, int* old_sendtypes, void *recvbuf, int *recvcnts,
int *rdispls, int* old_recvtypes, int* comm, int* ierr){
int size = simgrid::smpi::Comm::f2c(*comm)->size();
- MPI_Datatype* sendtypes = new MPI_Datatype[size];
- MPI_Datatype* recvtypes = new MPI_Datatype[size];
+ auto* sendtypes = new MPI_Datatype[size];
+ auto* recvtypes = new MPI_Datatype[size];
for(int i=0; i< size; i++){
if(FORT_IN_PLACE(sendbuf)!=MPI_IN_PLACE)
sendtypes[i] = simgrid::smpi::Datatype::f2c(old_sendtypes[i]);
int *rdispls, int* old_recvtypes, int* comm, int* request, int* ierr){
MPI_Request req;
int size = simgrid::smpi::Comm::f2c(*comm)->size();
- MPI_Datatype* sendtypes = new MPI_Datatype[size];
- MPI_Datatype* recvtypes = new MPI_Datatype[size];
+ auto* sendtypes = new MPI_Datatype[size];
+ auto* recvtypes = new MPI_Datatype[size];
for(int i=0; i< size; i++){
if(FORT_IN_PLACE(sendbuf)!=MPI_IN_PLACE)
sendtypes[i] = simgrid::smpi::Datatype::f2c(old_sendtypes[i]);
}
void mpi_comm_set_attr_ (int* comm, int* comm_keyval, int *attribute_val, int* ierr){
- int* val = (int*)xbt_malloc(sizeof(int));
- *val=*attribute_val;
- *ierr = MPI_Comm_set_attr ( simgrid::smpi::Comm::f2c(*comm), *comm_keyval, val);
+ auto* val = static_cast<int*>(xbt_malloc(sizeof(int)));
+ *val = *attribute_val;
+ *ierr = MPI_Comm_set_attr(simgrid::smpi::Comm::f2c(*comm), *comm_keyval, val);
}
void mpi_comm_delete_attr_ (int* comm, int* comm_keyval, int* ierr){
}
void mpi_type_set_attr_ (int* type, int* type_keyval, int *attribute_val, int* ierr){
- int* val = (int*)xbt_malloc(sizeof(int));
- *val=*attribute_val;
- *ierr = MPI_Type_set_attr ( simgrid::smpi::Datatype::f2c(*type), *type_keyval, val);
+ auto* val = static_cast<int*>(xbt_malloc(sizeof(int)));
+ *val = *attribute_val;
+ *ierr = MPI_Type_set_attr(simgrid::smpi::Datatype::f2c(*type), *type_keyval, val);
}
void mpi_type_delete_attr_ (int* type, int* type_keyval, int* ierr){
void mpi_type_hindexed_ (int* count, int* blocklens, int* indices, int* old_type, int* newtype, int* ierr) {
MPI_Datatype tmp;
- MPI_Aint* indices_aint=new MPI_Aint[*count];
+ auto* indices_aint = new MPI_Aint[*count];
for(int i=0; i<*count; i++)
indices_aint[i]=indices[i];
*ierr = MPI_Type_hindexed(*count, blocklens, indices_aint, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
void mpi_type_struct_ (int* count, int* blocklens, int* indices, int* old_types, int* newtype, int* ierr) {
MPI_Datatype tmp;
- MPI_Datatype* types = static_cast<MPI_Datatype*>(xbt_malloc(*count*sizeof(MPI_Datatype)));
- MPI_Aint* indices_aint=new MPI_Aint[*count];
+ auto* types = static_cast<MPI_Datatype*>(xbt_malloc(*count * sizeof(MPI_Datatype)));
+ auto* indices_aint = new MPI_Aint[*count];
for (int i = 0; i < *count; i++) {
indices_aint[i]=indices[i];
types[i] = simgrid::smpi::Datatype::f2c(old_types[i]);
void mpi_type_create_struct_(int* count, int* blocklens, MPI_Aint* indices, int* old_types, int* newtype, int* ierr){
MPI_Datatype tmp;
- MPI_Datatype* types = static_cast<MPI_Datatype*>(xbt_malloc(*count*sizeof(MPI_Datatype)));
+ auto* types = static_cast<MPI_Datatype*>(xbt_malloc(*count * sizeof(MPI_Datatype)));
for (int i = 0; i < *count; i++) {
types[i] = simgrid::smpi::Datatype::f2c(old_types[i]);
}
int rank = simgrid::s4u::this_actor::get_pid();
int dt_size_recv = recvtype->is_replayable() ? 1 : recvtype->size();
- std::vector<int>* trace_recvcounts = new std::vector<int>();
+ auto* trace_recvcounts = new std::vector<int>();
if (comm->rank() == root) {
for (int i = 0; i < comm->size(); i++) // copy data to avoid bad free
trace_recvcounts->push_back(recvcounts[i] * dt_size_recv);
int rank = simgrid::s4u::this_actor::get_pid();
int dt_size_recv = recvtype->is_replayable() ? 1 : recvtype->size();
- std::vector<int>* trace_recvcounts = new std::vector<int>();
+ auto* trace_recvcounts = new std::vector<int>();
for (int i = 0; i < comm->size(); i++) { // copy data to avoid bad free
trace_recvcounts->push_back(recvcounts[i] * dt_size_recv);
}
int rank = simgrid::s4u::this_actor::get_pid();
int dt_size_send = sendtype->is_replayable() ? 1 : sendtype->size();
- std::vector<int>* trace_sendcounts = new std::vector<int>();
+ auto* trace_sendcounts = new std::vector<int>();
if (comm->rank() == root) {
for (int i = 0; i < comm->size(); i++) { // copy data to avoid bad free
trace_sendcounts->push_back(sendcounts[i] * dt_size_send);
smpi_bench_end();
int rank = simgrid::s4u::this_actor::get_pid();
- std::vector<int>* trace_recvcounts = new std::vector<int>();
+ auto* trace_recvcounts = new std::vector<int>();
int dt_send_size = datatype->is_replayable() ? 1 : datatype->size();
int totalcount = 0;
int rank = simgrid::s4u::this_actor::get_pid();
int dt_send_size = datatype->is_replayable() ? 1 : datatype->size();
- std::vector<int>* trace_recvcounts = new std::vector<int>(recvcount * dt_send_size); // copy data to avoid bad free
+ auto* trace_recvcounts = new std::vector<int>(recvcount * dt_send_size); // copy data to avoid bad free
std::unique_ptr<unsigned char[]> tmp_sendbuf;
const void* real_sendbuf = smpi_get_in_place_buf(sendbuf, recvbuf, tmp_sendbuf, recvcount * count, datatype);
new simgrid::instr::VarCollTIData(request == MPI_REQUEST_IGNORED ? "reducescatter" : "ireducescatter", -1, 0,
nullptr, -1, trace_recvcounts, simgrid::smpi::Datatype::encode(datatype), ""));
- int* recvcounts = new int[count];
+ auto* recvcounts = new int[count];
for (int i = 0; i < count; i++)
recvcounts[i] = recvcount;
if (request == MPI_REQUEST_IGNORED)
smpi_bench_end();
int send_size = 0;
int recv_size = 0;
- std::vector<int>* trace_sendcounts = new std::vector<int>();
- std::vector<int>* trace_recvcounts = new std::vector<int>();
+ auto* trace_sendcounts = new std::vector<int>();
+ auto* trace_recvcounts = new std::vector<int>();
int dt_size_recv = recvtype->size();
const int* real_sendcounts = sendcounts;
int send_size = 0;
int recv_size = 0;
- std::vector<int>* trace_sendcounts = new std::vector<int>();
- std::vector<int>* trace_recvcounts = new std::vector<int>();
+ auto* trace_sendcounts = new std::vector<int>();
+ auto* trace_recvcounts = new std::vector<int>();
const int* real_sendcounts = sendcounts;
const int* real_senddispls = senddispls;
int src_traced = getPid(comm, src);
// FIXME: Hack the way to trace this one
- std::vector<int>* dst_hack = new std::vector<int>();
- std::vector<int>* src_hack = new std::vector<int>();
+ auto* dst_hack = new std::vector<int>();
+ auto* src_hack = new std::vector<int>();
dst_hack->push_back(dst_traced);
src_hack->push_back(src_traced);
TRACE_smpi_comm_in(my_proc_id, __func__,
{
CHECK_COUNT(1, count)
CHECK_MPI_NULL(4, MPI_DATATYPE_NULL, MPI_ERR_TYPE, old_type)
- int* blocklens=static_cast<int*>(xbt_malloc(blocklength*count*sizeof(int)));
+ auto* blocklens = static_cast<int*>(xbt_malloc(blocklength * count * sizeof(int)));
for (int i = 0; i < count; i++)
blocklens[i]=blocklength;
int retval = simgrid::smpi::Datatype::create_indexed(count, blocklens, indices, old_type, new_type);
MPI_Datatype* new_type) {
CHECK_COUNT(1, count)
CHECK_MPI_NULL(4, MPI_DATATYPE_NULL, MPI_ERR_TYPE, old_type)
- int* blocklens=(int*)xbt_malloc(blocklength*count*sizeof(int));
+ auto* blocklens = static_cast<int*>(xbt_malloc(blocklength * count * sizeof(int)));
for (int i = 0; i < count; i++)
blocklens[i] = blocklength;
int retval = simgrid::smpi::Datatype::create_hindexed(count, blocklens, indices, old_type, new_type);
MPI_Offset max_offset =
min_offset +
count * datatype->get_extent(); // cheating, as we don't care about exact data location, we can skip extent
- MPI_Offset* min_offsets = new MPI_Offset[size];
- MPI_Offset* max_offsets = new MPI_Offset[size];
+ auto* min_offsets = new MPI_Offset[size];
+ auto* max_offsets = new MPI_Offset[size];
simgrid::smpi::colls::allgather(&min_offset, 1, MPI_OFFSET, min_offsets, 1, MPI_OFFSET, comm_);
simgrid::smpi::colls::allgather(&max_offset, 1, MPI_OFFSET, max_offsets, 1, MPI_OFFSET, comm_);
MPI_Offset min = min_offset;
MPI_Offset my_chunk_start = (max - min + 1) / size * rank;
MPI_Offset my_chunk_end = ((max - min + 1) / size * (rank + 1));
XBT_CDEBUG(smpi_pmpi, "my chunks to read : %lld:%lld", my_chunk_start, my_chunk_end);
- int* send_sizes = new int[size];
- int* recv_sizes = new int[size];
- int* send_disps = new int[size];
- int* recv_disps = new int[size];
+ auto* send_sizes = new int[size];
+ auto* recv_sizes = new int[size];
+ auto* send_disps = new int[size];
+ auto* recv_disps = new int[size];
int total_sent = 0;
for (int i = 0; i < size; i++) {
send_sizes[i] = 0;
template <typename T>
int Keyval::keyval_create(const smpi_copy_fn& copy_fn, const smpi_delete_fn& delete_fn, int* keyval, void* extra_state)
{
- smpi_key_elem value = new s_smpi_key_elem_t;
+ auto* value = new s_smpi_key_elem_t;
value->copy_fn=copy_fn;
value->delete_fn=delete_fn;
MPI_Comm ActorExt::comm_self()
{
if (comm_self_ == MPI_COMM_NULL) {
- MPI_Group group = new Group(1);
- comm_self_ = new Comm(group, nullptr);
+ auto* group = new Group(1);
+ comm_self_ = new Comm(group, nullptr);
group->set_mapping(actor_, 0);
}
return comm_self_;
if (not smpi_cfg_papi_events_file().empty()) {
papi_counter_t& counter_data = smpi_process()->papi_counters();
int event_set = smpi_process()->papi_event_set();
- std::vector<long long> event_values = std::vector<long long>(counter_data.size());
+ std::vector<long long> event_values(counter_data.size());
if (PAPI_stop(event_set, &event_values[0]) != PAPI_OK) { // Error
XBT_CRITICAL("Could not stop PAPI counters.\n");
const papi_counter_t& counter_data = smpi_process()->papi_counters();
for (auto const& pair : counter_data) {
- simgrid::instr::VariableType* variable = static_cast<simgrid::instr::VariableType*>(container->type_->by_name(pair.first));
+ auto* variable = static_cast<simgrid::instr::VariableType*>(container->type_->by_name(pair.first));
variable->set_event(SIMIX_get_clock(), pair.second);
}
}
public:
Instance(int max_no_processes, MPI_Comm comm) : size_(max_no_processes), comm_world_(comm)
{
- MPI_Group group = new simgrid::smpi::Group(size_);
- comm_world_ = new simgrid::smpi::Comm(group, nullptr, false, -1);
+ auto* group = new simgrid::smpi::Group(size_);
+ comm_world_ = new simgrid::smpi::Comm(group, nullptr, false, -1);
// FIXME : using MPI_Attr_put with MPI_UNIVERSE_SIZE is forbidden and we make it a no-op (which triggers a warning
// as MPI_ERR_ARG is returned). Directly calling Comm::attr_put breaks for now, as MPI_UNIVERSE_SIZE,is <0
// instance.comm_world->attr_put<simgrid::smpi::Comm>(MPI_UNIVERSE_SIZE, reinterpret_cast<void*>(instance.size));
// first. Hence, we start at ++(events.begin())!
for (Tokenizer::iterator events_it = ++(event_tokens.begin()); events_it != event_tokens.end(); ++events_it) {
int event_code = PAPI_NULL;
- char* event_name = const_cast<char*>((*events_it).c_str());
+ auto* event_name = const_cast<char*>((*events_it).c_str());
if (PAPI_event_name_to_code(event_name, &event_code) != PAPI_OK) {
XBT_CRITICAL("Could not find PAPI event '%s'. Skipping.", event_name);
continue;
static int smpi_run_entry_point(const F& entry_point, const std::string& executable_path, std::vector<std::string> args)
{
// copy C strings, we need them writable
- std::vector<char*>* args4argv = new std::vector<char*>(args.size());
+ auto* args4argv = new std::vector<char*>(args.size());
std::transform(begin(args), end(args), begin(*args4argv), [](const std::string& s) { return xbt_strdup(s.c_str()); });
// set argv[0] to executable_path
// TODO, remove the number of functions involved here
static smpi_entry_point_type smpi_resolve_function(void* handle)
{
- smpi_fortran_entry_point_type entry_point_fortran = (smpi_fortran_entry_point_type)dlsym(handle, "user_main_");
+ auto* entry_point_fortran = reinterpret_cast<smpi_fortran_entry_point_type>(dlsym(handle, "user_main_"));
if (entry_point_fortran != nullptr) {
return [entry_point_fortran](int, char**) {
entry_point_fortran();
};
}
- smpi_c_entry_point_type entry_point = (smpi_c_entry_point_type)dlsym(handle, "main");
+ auto* entry_point = reinterpret_cast<smpi_c_entry_point_type>(dlsym(handle, "main"));
if (entry_point != nullptr) {
return entry_point;
}
#endif
// If this point is reached, sendfile() actually is not available. Copy file by hand.
const int bufsize = 1024 * 1024 * 4;
- char* buf = new char[bufsize];
+ auto* buf = new char[bufsize];
while (int got = read(fdin, buf, bufsize)) {
if (got == -1) {
xbt_assert(errno == EINTR, "Cannot read from %s", src.c_str());
#if not defined(__APPLE__) && not defined(__HAIKU__)
static int visit_libs(struct dl_phdr_info* info, size_t, void* data)
{
- char* libname = (char*)(data);
+ auto* libname = static_cast<char*>(data);
const char *path = info->dlpi_name;
if(strstr(path, libname)){
strncpy(libname, path, 512);
unsigned int count_requests = storage[simgrid::s4u::this_actor::get_pid()].size();
XBT_DEBUG("There are %ud elements in reqq[*]", count_requests);
if (count_requests > 0) {
- MPI_Request* requests= new MPI_Request[count_requests];
+ auto* requests = new MPI_Request[count_requests];
unsigned int i=0;
for (auto const& pair : storage[simgrid::s4u::this_actor::get_pid()].get_store()) {
size_t stop_block_offset = ALIGN_DOWN(stop_offset, smpi_shared_malloc_blocksize);
for (size_t offset = start_block_offset; offset < stop_block_offset; offset += smpi_shared_malloc_blocksize) {
XBT_DEBUG("\t\tglobal shared allocation, mmap block offset %zx", offset);
- void* pos = (void*)((unsigned long)mem + offset);
+ void* pos = static_cast<char*>(mem) + offset;
const void* res = mmap(pos, smpi_shared_malloc_blocksize, PROT_READ | PROT_WRITE, mmap_flag, huge_fd, 0);
xbt_assert(res == pos, "Could not map folded virtual memory (%s). Do you perhaps need to increase the "
"size of the mapped file using --cfg=smpi/shared-malloc-blocksize:newvalue (default 1048576) ? "
size_t low_page_stop_offset = (int64_t)start_block_offset < ALIGN_DOWN(stop_offset, PAGE_SIZE) ? start_block_offset : ALIGN_DOWN(stop_offset, PAGE_SIZE);
if(low_page_start_offset < low_page_stop_offset) {
XBT_DEBUG("\t\tglobal shared allocation, mmap block start");
- void* pos = (void*)((unsigned long)mem + low_page_start_offset);
+ void* pos = static_cast<char*>(mem) + low_page_start_offset;
const void* res = mmap(pos, low_page_stop_offset - low_page_start_offset, PROT_READ | PROT_WRITE,
mmap_base_flag, // not a full huge page
smpi_shared_malloc_bogusfile, 0);
XBT_DEBUG("\t\tglobal shared allocation, mmap block stop");
size_t high_page_stop_offset = stop_offset == size ? size : ALIGN_DOWN(stop_offset, PAGE_SIZE);
if(high_page_stop_offset > stop_block_offset) {
- void* pos = (void*)((unsigned long)mem + stop_block_offset);
+ void* pos = static_cast<char*>(mem) + stop_block_offset;
const void* res = mmap(pos, high_page_stop_offset - stop_block_offset, PROT_READ | PROT_WRITE,
mmap_base_flag, // not a full huge page
smpi_shared_malloc_bogusfile, 0);
shared_metadata_t newmeta;
//register metadata for memcpy avoidance
- shared_data_key_type* data = new shared_data_key_type;
+ auto* data = new shared_data_key_type;
data->second.fd = -1;
data->second.count = 1;
newmeta.size = size;
// we need to switch as the called function may silently touch global variables
smpi_switch_data_segment(s4u::Actor::self());
}
- MPI_Group cp = new Group(this->group());
+ auto* cp = new Group(this->group());
(*newcomm) = new Comm(cp, this->topo());
int ret = MPI_SUCCESS;
}
}
XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size);
- MPI_Group group_intra = new Group(intra_comm_size);
+ auto* group_intra = new Group(intra_comm_size);
int i = 0;
for (auto& actor : actor_list) {
if (this->group()->rank(actor.ciface()) != MPI_UNDEFINED) {
// identify neighbors in comm
MPI_Comm comm_intra = find_intra_comm(&leader);
-
- int* leaders_map = new int[comm_size];
- int* leader_list = new int[comm_size];
+ auto* leaders_map = new int[comm_size];
+ auto* leader_list = new int[comm_size];
std::fill_n(leaders_map, comm_size, 0);
std::fill_n(leader_list, comm_size, -1);
xbt_assert(leader_group_size > 0);
std::sort(leader_list, leader_list + leader_group_size);
- MPI_Group leaders_group = new Group(leader_group_size);
+ auto* leaders_group = new Group(leader_group_size);
MPI_Comm leader_comm = MPI_COMM_NULL;
if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && this!=MPI_COMM_WORLD){
//Default serialization method : memcpy.
void Datatype::serialize(const void* noncontiguous_buf, void* contiguous_buf, int count)
{
- char* contiguous_buf_char = static_cast<char*>(contiguous_buf);
- const char* noncontiguous_buf_char = static_cast<const char*>(noncontiguous_buf)+lb_;
+ auto* contiguous_buf_char = static_cast<char*>(contiguous_buf);
+ const auto* noncontiguous_buf_char = static_cast<const char*>(noncontiguous_buf) + lb_;
memcpy(contiguous_buf_char, noncontiguous_buf_char, count*size_);
}
void Datatype::unserialize(const void* contiguous_buf, void *noncontiguous_buf, int count, MPI_Op op){
- const char* contiguous_buf_char = static_cast<const char*>(contiguous_buf);
- char* noncontiguous_buf_char = static_cast<char*>(noncontiguous_buf)+lb_;
+ const auto* contiguous_buf_char = static_cast<const char*>(contiguous_buf);
+ auto* noncontiguous_buf_char = static_cast<char*>(noncontiguous_buf) + lb_;
int n=count;
if(op!=MPI_OP_NULL)
op->apply( contiguous_buf_char, noncontiguous_buf_char, &n, this);
void Type_Contiguous::serialize(const void* noncontiguous_buf, void* contiguous_buf, int count)
{
- char* contiguous_buf_char = static_cast<char*>(contiguous_buf);
- const char* noncontiguous_buf_char = static_cast<const char*>(noncontiguous_buf)+lb();
+ auto* contiguous_buf_char = static_cast<char*>(contiguous_buf);
+ const auto* noncontiguous_buf_char = static_cast<const char*>(noncontiguous_buf) + lb();
memcpy(contiguous_buf_char, noncontiguous_buf_char, count * block_count_ * old_type_->size());
}
void Type_Contiguous::unserialize(const void* contiguous_buf, void* noncontiguous_buf, int count, MPI_Op op)
{
- const char* contiguous_buf_char = static_cast<const char*>(contiguous_buf);
- char* noncontiguous_buf_char = static_cast<char*>(noncontiguous_buf)+lb();
+ const auto* contiguous_buf_char = static_cast<const char*>(contiguous_buf);
+ auto* noncontiguous_buf_char = static_cast<char*>(noncontiguous_buf) + lb();
int n= count*block_count_;
if(op!=MPI_OP_NULL)
op->apply( contiguous_buf_char, noncontiguous_buf_char, &n, old_type_);
void Type_Hvector::serialize(const void* noncontiguous_buf, void *contiguous_buf,
int count){
- char* contiguous_buf_char = static_cast<char*>(contiguous_buf);
- const char* noncontiguous_buf_char = static_cast<const char*>(noncontiguous_buf);
+ auto* contiguous_buf_char = static_cast<char*>(contiguous_buf);
+ const auto* noncontiguous_buf_char = static_cast<const char*>(noncontiguous_buf);
for (int i = 0; i < block_count_ * count; i++) {
if (not(old_type_->flags() & DT_FLAG_DERIVED))
void Type_Hvector::unserialize(const void* contiguous_buf, void *noncontiguous_buf,
int count, MPI_Op op){
- const char* contiguous_buf_char = static_cast<const char*>(contiguous_buf);
- char* noncontiguous_buf_char = static_cast<char*>(noncontiguous_buf);
+ const auto* contiguous_buf_char = static_cast<const char*>(contiguous_buf);
+ auto* noncontiguous_buf_char = static_cast<char*>(noncontiguous_buf);
for (int i = 0; i < block_count_ * count; i++) {
if (not(old_type_->flags() & DT_FLAG_DERIVED)) {
, block_indices_(new MPI_Aint[count])
, old_type_(old_type)
{
- int* ints = new int[count+1];
+ auto* ints = new int[count + 1];
ints[0]=count;
for(int i=1;i<=count;i++)
ints[i]=block_lengths[i-1];
void Type_Hindexed::serialize(const void* noncontiguous_buf, void *contiguous_buf,
int count){
- char* contiguous_buf_char = static_cast<char*>(contiguous_buf);
- const char* noncontiguous_buf_iter = static_cast<const char*>(noncontiguous_buf);
- const char* noncontiguous_buf_char = noncontiguous_buf_iter + block_indices_[0];
+ auto* contiguous_buf_char = static_cast<char*>(contiguous_buf);
+ const auto* noncontiguous_buf_iter = static_cast<const char*>(noncontiguous_buf);
+ const auto* noncontiguous_buf_char = noncontiguous_buf_iter + block_indices_[0];
for (int j = 0; j < count; j++) {
for (int i = 0; i < block_count_; i++) {
if (not(old_type_->flags() & DT_FLAG_DERIVED))
void Type_Hindexed::unserialize(const void* contiguous_buf, void *noncontiguous_buf,
int count, MPI_Op op){
- const char* contiguous_buf_char = static_cast<const char*>(contiguous_buf);
- char* noncontiguous_buf_char = static_cast<char*>(noncontiguous_buf)+ block_indices_[0];
+ const auto* contiguous_buf_char = static_cast<const char*>(contiguous_buf);
+ auto* noncontiguous_buf_char = static_cast<char*>(noncontiguous_buf) + block_indices_[0];
for (int j = 0; j < count; j++) {
for (int i = 0; i < block_count_; i++) {
if (not(old_type_->flags() & DT_FLAG_DERIVED)) {
: Type_Hindexed(size, lb, ub, flags, count, block_lengths, block_indices, old_type, old_type->get_extent())
{
delete contents_;
- int* ints = new int[2*count+1];
+ auto* ints = new int[2 * count + 1];
ints[0]=count;
for(int i=1;i<=count;i++)
ints[i]=block_lengths[i-1];
, block_indices_(new MPI_Aint[count])
, old_types_(new MPI_Datatype[count])
{
- int* ints = new int[count+1];
+ auto* ints = new int[count + 1];
ints[0]=count;
for(int i=1;i<=count;i++)
ints[i]=block_lengths[i-1];
void Type_Struct::serialize(const void* noncontiguous_buf, void *contiguous_buf,
int count){
- char* contiguous_buf_char = static_cast<char*>(contiguous_buf);
- const char* noncontiguous_buf_iter = static_cast<const char*>(noncontiguous_buf);
- const char* noncontiguous_buf_char = noncontiguous_buf_iter + block_indices_[0];
+ auto* contiguous_buf_char = static_cast<char*>(contiguous_buf);
+ const auto* noncontiguous_buf_iter = static_cast<const char*>(noncontiguous_buf);
+ const auto* noncontiguous_buf_char = noncontiguous_buf_iter + block_indices_[0];
for (int j = 0; j < count; j++) {
for (int i = 0; i < block_count_; i++) {
if (not(old_types_[i]->flags() & DT_FLAG_DERIVED))
void Type_Struct::unserialize(const void* contiguous_buf, void *noncontiguous_buf,
int count, MPI_Op op){
- const char* contiguous_buf_char = static_cast<const char*>(contiguous_buf);
- char* noncontiguous_buf_char = static_cast<char*>(noncontiguous_buf)+ block_indices_[0];
+ const auto* contiguous_buf_char = static_cast<const char*>(contiguous_buf);
+ auto* noncontiguous_buf_char = static_cast<char*>(noncontiguous_buf) + block_indices_[0];
for (int j = 0; j < count; j++) {
for (int i = 0; i < block_count_; i++) {
if (not(old_types_[i]->flags() & DT_FLAG_DERIVED)) {
int File::del(const char* filename, const Info*)
{
//get the file with MPI_MODE_DELETE_ON_CLOSE and then close it
- File* f = new File(MPI_COMM_SELF,filename,MPI_MODE_DELETE_ON_CLOSE|MPI_MODE_RDWR, nullptr);
+ auto* f = new File(MPI_COMM_SELF, filename, MPI_MODE_DELETE_ON_CLOSE | MPI_MODE_RDWR, nullptr);
close(&f);
return MPI_SUCCESS;
}
int oldsize = size_;
int newsize = oldsize - n;
*newgroup = new Group(newsize);
- int* to_exclude = new int[size_];
+ auto* to_exclude = new int[size_];
for (int i = 0; i < oldsize; i++)
to_exclude[i]=0;
for (int i = 0; i < n; i++)
bool Request::match_recv(void* a, void* b, simgrid::kernel::activity::CommImpl*)
{
- MPI_Request ref = static_cast<MPI_Request>(a);
- MPI_Request req = static_cast<MPI_Request>(b);
+ auto ref = static_cast<MPI_Request>(a);
+ auto req = static_cast<MPI_Request>(b);
return match_common(req, req, ref);
}
bool Request::match_send(void* a, void* b, simgrid::kernel::activity::CommImpl*)
{
- MPI_Request ref = static_cast<MPI_Request>(a);
- MPI_Request req = static_cast<MPI_Request>(b);
+ auto ref = static_cast<MPI_Request>(a);
+ auto req = static_cast<MPI_Request>(b);
return match_common(req, ref, req);
}
static int nsleeps = 1;
double speed = s4u::this_actor::get_host()->get_speed();
double maxrate = smpi_cfg_iprobe_cpu_usage();
- MPI_Request request = new Request(nullptr, 0, MPI_CHAR,
- source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source)->get_pid(),
- simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_RECV);
+ auto request = new Request(nullptr, 0, MPI_CHAR,
+ source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source)->get_pid(),
+ simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_RECV);
if (smpi_iprobe_sleep > 0) {
/** Compute the number of flops we will sleep **/
s4u::this_actor::exec_init(/*nsleeps: See comment above */ nsleeps *
} else {
if(comm_cart != nullptr){
if (rank == 0) {
- MPI_Group group = new Group(MPI_COMM_SELF->group());
- *comm_cart = new Comm(group, std::shared_ptr<Topo>(this));
+ auto* group = new Group(MPI_COMM_SELF->group());
+ *comm_cart = new Comm(group, std::shared_ptr<Topo>(this));
} else {
*comm_cart = MPI_COMM_NULL;
}
res = new Topo_Cart(getComm(), newNDims, newDims, newPeriodic, 0, newcomm);
} else {
*newcomm = getComm()->split(color, getComm()->rank());
- res = new Topo_Cart(getComm(), newNDims, newDims, newPeriodic, 0, nullptr);
- std::shared_ptr<Topo> topo=std::shared_ptr<Topo>(res);
+ auto topo = std::make_shared<Topo_Cart>(getComm(), newNDims, newDims, newPeriodic, 0, nullptr);
+ res = topo.get();
res->setComm(*newcomm);
(*newcomm)->set_topo(topo);
}
return MPI_ERR_DIMS;
}
- int* position = new int[ndims_];
+ auto* position = new int[ndims_];
this->coords(getComm()->rank(), ndims_, position);
position[direction] += disp;
}
/* Allocate and initialize the bins */
- int *bins = new int[ndim];
+ auto* bins = new int[ndim];
*pdims = bins;
int *p = bins;
if(target_count*target_datatype->get_extent()>recv_win->size_)
return MPI_ERR_ARG;
- void* recv_addr = static_cast<void*> ( static_cast<char*>(recv_win->base_) + target_disp * recv_win->disp_unit_);
+ void* recv_addr = static_cast<char*>(recv_win->base_) + target_disp * recv_win->disp_unit_;
if (target_rank != comm_->rank()) { // This is not for myself, so we need to send messages
XBT_DEBUG("Entering MPI_Put to remote rank %d", target_rank);
if(target_count*target_datatype->get_extent()>recv_win->size_)
return MPI_ERR_ARG;
- void* recv_addr = static_cast<void*>(static_cast<char*>(recv_win->base_) + target_disp * recv_win->disp_unit_);
+ void* recv_addr = static_cast<char*>(recv_win->base_) + target_disp * recv_win->disp_unit_;
XBT_DEBUG("Entering MPI_Accumulate to %d", target_rank);
// As the tag will be used for ordering of the operations, subtract count from it (to avoid collisions with other
// SMPI tags, SMPI_RMA_TAG is set below all the other ones we use)
if(smpi_process()->replaying()) {//When replaying, we register an event.
smpi_container(rank)->get_state("MIGRATE_STATE")->add_entity_value(operation);
- simgrid::instr::EventType* type =
- static_cast<simgrid::instr::EventType*>(smpi_container(rank)->type_->by_name(operation));
+ auto* type = static_cast<simgrid::instr::EventType*>(smpi_container(rank)->type_->by_name(operation));
new simgrid::instr::NewEvent(smpi_process()->simulated_elapsed(), smpi_container(rank), type,
type->get_entity_value(operation));
} else {