/* smpi_datatype.cpp -- MPI primitives to handle datatypes */
-/* Copyright (c) 2009-2021. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2009-2023. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
CREATE_MPI_DATATYPE(COUNT, 59, long long, DT_FLAG_MULTILANG)
MPI_Datatype MPI_PTR = &smpi_MPI_PTR;
-
-namespace simgrid{
-namespace smpi{
+namespace simgrid::smpi {
std::unordered_map<int, smpi_key_elem> Datatype::keyvals_; // required by the Keyval class implementation
int Datatype::keyval_id_=0; // required by the Keyval class implementation
Datatype::Datatype(int size, MPI_Aint lb, MPI_Aint ub, int flags) : size_(size), lb_(lb), ub_(ub), flags_(flags)
{
this->add_f();
-#if SIMGRID_HAVE_MC
- if(MC_is_active())
- MC_ignore(&refcount_, sizeof refcount_);
-#endif
}
// for predefined types, so refcount_ = 0.
Datatype::Datatype(const char* name, int ident, int size, MPI_Aint lb, MPI_Aint ub, int flags)
: name_(name), id(std::to_string(ident)), size_(size), lb_(lb), ub_(ub), flags_(flags), refcount_(0)
{
- id2type_lookup.insert({id, this});
-#if SIMGRID_HAVE_MC
- if(MC_is_active())
- MC_ignore(&refcount_, sizeof refcount_);
-#endif
+ id2type_lookup.try_emplace(id, this);
}
Datatype::Datatype(Datatype* datatype, int* ret)
flags_ &= ~DT_FLAG_PREDEFINED;
set_contents(MPI_COMBINER_DUP, 0, nullptr, 0, nullptr, 1, &datatype);
- for (auto const& it : datatype->attributes()) {
- auto elem_it = keyvals_.find(it.first);
- xbt_assert(elem_it != keyvals_.end(), "Keyval not found for Datatype: %d", it.first);
+ for (auto const& [key, value] : datatype->attributes()) {
+ auto elem_it = keyvals_.find(key);
+ xbt_assert(elem_it != keyvals_.end(), "Keyval not found for Datatype: %d", key);
smpi_key_elem& elem = elem_it->second;
int ret = MPI_SUCCESS;
int flag = 0;
void* value_out = nullptr;
if (elem.copy_fn.type_copy_fn == MPI_TYPE_DUP_FN) {
- value_out = it.second;
+ value_out = value;
flag = 1;
} else if (elem.copy_fn.type_copy_fn != MPI_NULL_COPY_FN) {
- ret = elem.copy_fn.type_copy_fn(datatype, it.first, elem.extra_state, it.second, &value_out, &flag);
+ ret = elem.copy_fn.type_copy_fn(datatype, key, elem.extra_state, value, &value_out, &flag);
}
+ if (ret != MPI_SUCCESS)
+ return ret;
+
if (elem.copy_fn.type_copy_fn_fort != MPI_NULL_COPY_FN) {
value_out = xbt_new(int, 1);
if (*(int*)*elem.copy_fn.type_copy_fn_fort == 1) { // MPI_TYPE_DUP_FN
- memcpy(value_out, it.second, sizeof(int));
+ memcpy(value_out, value, sizeof(int));
flag = 1;
} else { // not null, nor dup
- elem.copy_fn.type_copy_fn_fort(datatype, it.first, elem.extra_state, it.second, value_out, &flag, &ret);
+ elem.copy_fn.type_copy_fn_fort(datatype, key, elem.extra_state, value, value_out, &flag, &ret);
}
- if (ret != MPI_SUCCESS)
+ if (ret != MPI_SUCCESS) {
xbt_free(value_out);
+ return ret;
+ }
}
- if (ret != MPI_SUCCESS)
- return ret;
if (flag) {
elem.refcount++;
- attributes().emplace(it.first, value_out);
+ attributes().try_emplace(key, value_out);
}
}
return MPI_SUCCESS;
void Datatype::ref()
{
refcount_++;
-
-#if SIMGRID_HAVE_MC
- if(MC_is_active())
- MC_ignore(&refcount_, sizeof refcount_);
-#endif
}
void Datatype::unref(MPI_Datatype datatype)
if (datatype->refcount_ > 0)
datatype->refcount_--;
-#if SIMGRID_HAVE_MC
- if(MC_is_active())
- MC_ignore(&datatype->refcount_, sizeof datatype->refcount_);
-#endif
-
if (datatype->refcount_ == 0 && not(datatype->flags_ & DT_FLAG_PREDEFINED))
delete datatype;
}
return (flags_ & DT_FLAG_BASIC);
}
-bool Datatype::is_replayable() const
-{
- return (simgrid::instr::trace_format == simgrid::instr::TraceFormat::Ti) &&
- ((this == MPI_BYTE) || (this == MPI_DOUBLE) || (this == MPI_INT) || (this == MPI_CHAR) ||
- (this == MPI_SHORT) || (this == MPI_LONG) || (this == MPI_FLOAT));
-}
-
MPI_Datatype Datatype::decode(const std::string& datatype_id)
{
return id2type_lookup.find(datatype_id)->second;
{
// FIXME Handle the case of a partial shared malloc.
- if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) {
- smpi_switch_data_segment(simgrid::s4u::Actor::self());
- }
+ smpi_switch_data_segment(simgrid::s4u::Actor::self());
+
/* First check if we really have something to do */
size_t offset = 0;
std::vector<std::pair<size_t, size_t>> private_blocks;
ub=((count-1)*stride+block_length-1)*old_type->get_extent()+old_type->ub();
}
if(old_type->flags() & DT_FLAG_DERIVED || stride != block_length){
- *new_type = new Type_Vector(count * block_length * old_type->size(), lb, ub, DT_FLAG_DERIVED, count, block_length,
+ *new_type = new Type_Vector(old_type->size() * block_length * count, lb, ub, DT_FLAG_DERIVED, count, block_length,
stride, old_type);
retval=MPI_SUCCESS;
}else{
/* in this situation the data are contiguous thus it's not required to serialize and unserialize it*/
- *new_type = new Datatype(count * block_length * old_type->size(), 0, ((count -1) * stride + block_length)*
- old_type->size(), DT_FLAG_CONTIGUOUS|DT_FLAG_DERIVED);
+ *new_type =
+ new Datatype(old_type->size() * block_length * count, 0,
+ old_type->size() * ((count - 1) * stride + block_length), DT_FLAG_CONTIGUOUS | DT_FLAG_DERIVED);
const std::array<int, 3> ints = {{count, block_length, stride}};
(*new_type)->set_contents(MPI_COMBINER_VECTOR, 3, ints.data(), 0, nullptr, 1, &old_type);
retval=MPI_SUCCESS;
ub=((count-1)*stride)+(block_length-1)*old_type->get_extent()+old_type->ub();
}
if(old_type->flags() & DT_FLAG_DERIVED || stride != block_length*old_type->get_extent()){
- *new_type = new Type_Hvector(count * block_length * old_type->size(), lb, ub, DT_FLAG_DERIVED, count, block_length,
+ *new_type = new Type_Hvector(old_type->size() * block_length * count, lb, ub, DT_FLAG_DERIVED, count, block_length,
stride, old_type);
retval=MPI_SUCCESS;
}else{
/* in this situation the data are contiguous thus it's not required to serialize and unserialize it*/
- *new_type = new Datatype(count * block_length * old_type->size(), 0, count * block_length * old_type->size(), DT_FLAG_CONTIGUOUS|DT_FLAG_DERIVED);
+ *new_type = new Datatype(old_type->size() * block_length * count, 0, old_type->size() * block_length * count,
+ DT_FLAG_CONTIGUOUS | DT_FLAG_DERIVED);
const std::array<int, 2> ints = {{count, block_length}};
(*new_type)->set_contents(MPI_COMBINER_HVECTOR, 2, ints.data(), 1, &stride, 1, &old_type);
retval=MPI_SUCCESS;
if(indices[i]+block_lengths[i]*old_type->ub()>ub)
ub = indices[i]+block_lengths[i]*old_type->ub();
- if ( (i< count -1) && (indices[i]+block_lengths[i]*(static_cast<int>(old_type->size())) != indices[i+1]) )
+ if ((i < count - 1) && (indices[i] + static_cast<MPI_Aint>(old_type->size()) * block_lengths[i] != indices[i + 1]))
contiguous=false;
}
if (old_type->flags_ & DT_FLAG_DERIVED || lb!=0)
if (not forced_ub && indices[i] + block_lengths[i] * old_types[i]->ub() > ub)
ub = indices[i]+block_lengths[i]*old_types[i]->ub();
- if ( (i< count -1) && (indices[i]+block_lengths[i]*static_cast<int>(old_types[i]->size()) != indices[i+1]) )
+ if ((i < count - 1) &&
+ (indices[i] + static_cast<MPI_Aint>(old_types[i]->size() * block_lengths[i]) != indices[i + 1]))
contiguous=false;
}
if (not contiguous) {
return static_cast<Datatype*>(F2C::f2c(id));
}
-} // namespace smpi
-} // namespace simgrid
+} // namespace simgrid::smpi