/* smpi_datatype.cpp -- MPI primitives to handle datatypes */
-/* Copyright (c) 2009-2019. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2009-2020. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
{
id = std::to_string(ident);
}
-Datatype::Datatype(int size,MPI_Aint lb, MPI_Aint ub, int flags) : name_(nullptr), size_(size), lb_(lb), ub_(ub), flags_(flags), refcount_(1){
+
+Datatype::Datatype(int size, MPI_Aint lb, MPI_Aint ub, int flags) : size_(size), lb_(lb), ub_(ub), flags_(flags)
+{
#if SIMGRID_HAVE_MC
if(MC_is_active())
MC_ignore(&(refcount_), sizeof(refcount_));
#endif
}
-//for predefined types, so in_use = 0.
+// for predefined types, so refcount_ = 0.
Datatype::Datatype(char* name, int ident, int size, MPI_Aint lb, MPI_Aint ub, int flags)
: name_(name), id(std::to_string(ident)), size_(size), lb_(lb), ub_(ub), flags_(flags), refcount_(0)
{
#endif
}
-Datatype::Datatype(Datatype *datatype, int* ret) : name_(nullptr), size_(datatype->size_), lb_(datatype->lb_), ub_(datatype->ub_), flags_(datatype->flags_), refcount_(1)
+Datatype::Datatype(Datatype* datatype, int* ret)
+ : size_(datatype->size_), lb_(datatype->lb_), ub_(datatype->ub_), flags_(datatype->flags_)
{
flags_ &= ~DT_FLAG_PREDEFINED;
*ret = MPI_SUCCESS;
}
}
}
+ contents_ = new Datatype_contents(MPI_COMBINER_DUP, 0, nullptr, 0, nullptr, 1, &datatype);
}
-Datatype::~Datatype(){
+Datatype::~Datatype()
+{
xbt_assert(refcount_ >= 0);
if(flags_ & DT_FLAG_PREDEFINED)
}
cleanup_attr<Datatype>();
-
+ delete contents_;
xbt_free(name_);
}
-void Datatype::ref(){
+MPI_Datatype Datatype::clone(){
+ int ret = MPI_SUCCESS;
+ return new Datatype(this, &ret);
+}
+void Datatype::ref()
+{
refcount_++;
#if SIMGRID_HAVE_MC
name_ = xbt_strdup(name);
}
-int Datatype::pack(const void* inbuf, int incount, void* outbuf, int outcount, int* position, MPI_Comm)
+int Datatype::pack(const void* inbuf, int incount, void* outbuf, int outcount, int* position, const Comm*)
{
if (outcount - *position < incount*static_cast<int>(size_))
return MPI_ERR_OTHER;
return MPI_SUCCESS;
}
-int Datatype::unpack(const void* inbuf, int insize, int* position, void* outbuf, int outcount, MPI_Comm)
+int Datatype::unpack(const void* inbuf, int insize, int* position, void* outbuf, int outcount, const Comm*)
{
if (outcount*static_cast<int>(size_)> insize)
return MPI_ERR_OTHER;
return MPI_SUCCESS;
}
-int Datatype::copy(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
- void *recvbuf, int recvcount, MPI_Datatype recvtype){
+int Datatype::get_contents (int max_integers, int max_addresses,
+ int max_datatypes, int* array_of_integers, MPI_Aint* array_of_addresses,
+ MPI_Datatype *array_of_datatypes)
+{
+ if(contents_==nullptr)
+ return MPI_ERR_ARG;
+ if(max_integers<contents_->number_of_integers_)
+ return MPI_ERR_COUNT;
+ for(int i=0; i<contents_->number_of_integers_; i++){
+ array_of_integers[i]=contents_->integers_[i];
+ }
+ if(max_addresses<contents_->number_of_addresses_)
+ return MPI_ERR_COUNT;
+ for(int i=0; i<contents_->number_of_addresses_; i++){
+ array_of_addresses[i]=contents_->addresses_[i];
+ }
+ if(max_datatypes<contents_->number_of_datatypes_)
+ return MPI_ERR_COUNT;
+ for(int i=0; i<contents_->number_of_datatypes_; i++){
+ array_of_datatypes[i]=contents_->datatypes_[i];
+ contents_->datatypes_[i]->ref();
+ }
+ return MPI_SUCCESS;
+}
-// FIXME Handle the case of a partial shared malloc.
+int Datatype::get_envelope (int* num_integers, int* num_addresses,
+ int* num_datatypes, int* combiner)
+{
+ if(contents_==nullptr){
+ *combiner = MPI_COMBINER_NAMED;
+ }else{
+ *num_integers = contents_->number_of_integers_;
+ *num_addresses = contents_->number_of_addresses_;
+ *num_datatypes = contents_->number_of_datatypes_;
+ *combiner = contents_->combiner_;
+ }
+ return MPI_SUCCESS;
+}
+
+int Datatype::copy(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
+ MPI_Datatype recvtype)
+{
+ // FIXME Handle the case of a partial shared malloc.
if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) {
smpi_switch_data_segment(simgrid::s4u::Actor::self());
recvtype->unserialize(sendbuf, recvbuf, count / recvtype->size(), MPI_REPLACE);
} else if (not(recvtype->flags() & DT_FLAG_DERIVED)) {
sendtype->serialize(sendbuf, recvbuf, count / sendtype->size());
- }else{
-
+ } else {
void * buf_tmp = xbt_malloc(count);
sendtype->serialize( sendbuf, buf_tmp,count/sendtype->size());
/* in this situation the data are contiguous thus it's not required to serialize and unserialize it*/
*new_type = new Datatype(count * block_length * old_type->size(), 0, ((count -1) * stride + block_length)*
old_type->size(), DT_FLAG_CONTIGUOUS);
+ int ints[3] = {count, block_length, stride};
+ (*new_type)->contents_ = new Datatype_contents(MPI_COMBINER_VECTOR, 3, ints, 0, nullptr, 1, &old_type);
retval=MPI_SUCCESS;
}
return retval;
}else{
/* in this situation the data are contiguous thus it's not required to serialize and unserialize it*/
*new_type = new Datatype(count * block_length * old_type->size(), 0, count * block_length * old_type->size(), DT_FLAG_CONTIGUOUS);
+ int ints[2] = {count, block_length};
+ (*new_type)->contents_ = new Datatype_contents(MPI_COMBINER_HVECTOR, 2, ints, 1, &stride, 1, &old_type);
retval=MPI_SUCCESS;
}
return retval;
return MPI_SUCCESS;
}
-Datatype* Datatype::f2c(int id){
+Datatype* Datatype::f2c(int id)
+{
return static_cast<Datatype*>(F2C::f2c(id));
}
-
-
-}
-}
-
+} // namespace smpi
+} // namespace simgrid