typedef SMPI_Errhandler* MPI_Errhandler;
#define MPI_ERRHANDLER_NULL ((MPI_Errhandler)NULL)
+typedef enum SMPI_Combiner_enum{
+ MPI_COMBINER_NAMED,
+ MPI_COMBINER_DUP,
+ MPI_COMBINER_CONTIGUOUS,
+ MPI_COMBINER_VECTOR,
+ MPI_COMBINER_HVECTOR_INTEGER,
+ MPI_COMBINER_HVECTOR,
+ MPI_COMBINER_INDEXED,
+ MPI_COMBINER_HINDEXED_INTEGER,
+ MPI_COMBINER_HINDEXED,
+ MPI_COMBINER_INDEXED_BLOCK,
+ MPI_COMBINER_STRUCT_INTEGER,
+ MPI_COMBINER_STRUCT,
+ MPI_COMBINER_SUBARRAY,
+ MPI_COMBINER_DARRAY,
+ MPI_COMBINER_F90_REAL,
+ MPI_COMBINER_F90_COMPLEX,
+ MPI_COMBINER_F90_INTEGER,
+ MPI_COMBINER_RESIZED,
+ MPI_COMBINER_HINDEXED_BLOCK
+}SMPI_Combiner_enum;
typedef enum SMPI_Topo_type {
MPI_GRAPH=1,
MPI_CALL(XBT_PUBLIC int, MPI_Win_set_errhandler, (MPI_Win win, MPI_Errhandler errhandler));
MPI_CALL(XBT_PUBLIC int, MPI_Win_get_errhandler, (MPI_Win win, MPI_Errhandler* errhandler));
MPI_CALL(XBT_PUBLIC int, MPI_Win_create_errhandler, (MPI_Win_errhandler_fn * function, MPI_Errhandler* errhandler));
-MPI_CALL(XBT_PUBLIC int, MPI_Win_call_errhandler, (MPI_Win win, int errorcode));MPI_CALL(XBT_PUBLIC int, MPI_Type_get_envelope,
+MPI_CALL(XBT_PUBLIC int, MPI_Win_call_errhandler, (MPI_Win win, int errorcode));
+MPI_CALL(XBT_PUBLIC int, MPI_Type_get_contents,
+ (MPI_Datatype datatype, int max_integers, int max_addresses, int max_datatypes, int* array_of_integers,
+ MPI_Aint* array_of_addresses, MPI_Datatype* array_of_datatypes));
+MPI_CALL(XBT_PUBLIC int, MPI_Type_get_envelope,
(MPI_Datatype datatype, int* num_integers, int* num_addresses, int* num_datatypes, int* combiner));
MPI_CALL(XBT_PUBLIC int, MPI_File_call_errhandler, (MPI_File fh, int errorcode));
MPI_CALL(XBT_PUBLIC int, MPI_File_create_errhandler,
MPI_CALL(XBT_PUBLIC int, MPI_File_get_errhandler, (MPI_File file, MPI_Errhandler* errhandler));
//FIXME: these are not yet implemented
-typedef enum MPIR_Combiner_enum{
- MPI_COMBINER_NAMED,
- MPI_COMBINER_DUP,
- MPI_COMBINER_CONTIGUOUS,
- MPI_COMBINER_VECTOR,
- MPI_COMBINER_HVECTOR_INTEGER,
- MPI_COMBINER_HVECTOR,
- MPI_COMBINER_INDEXED,
- MPI_COMBINER_HINDEXED_INTEGER,
- MPI_COMBINER_HINDEXED,
- MPI_COMBINER_INDEXED_BLOCK,
- MPI_COMBINER_STRUCT_INTEGER,
- MPI_COMBINER_STRUCT,
- MPI_COMBINER_SUBARRAY,
- MPI_COMBINER_DARRAY,
- MPI_COMBINER_F90_REAL,
- MPI_COMBINER_F90_COMPLEX,
- MPI_COMBINER_F90_INTEGER,
- MPI_COMBINER_RESIZED,
- MPI_COMBINER_HINDEXED_BLOCK
-}MPIR_Combiner_enum;
typedef void* MPI_Message;
#define MPI_DUP_FN 1
(const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request));
MPI_CALL(XBT_PUBLIC int, MPI_Get_elements, (MPI_Status * status, MPI_Datatype datatype, int* elements));
MPI_CALL(XBT_PUBLIC int, MPI_Pcontrol, (const int level, ...));
-MPI_CALL(XBT_PUBLIC int, MPI_Type_get_contents,
- (MPI_Datatype datatype, int max_integers, int max_addresses, int max_datatypes, int* array_of_integers,
- MPI_Aint* array_of_addresses, MPI_Datatype* array_of_datatypes));
MPI_CALL(XBT_PUBLIC int, MPI_Type_create_darray,
(int size, int rank, int ndims, int* array_of_gsizes, int* array_of_distribs, int* array_of_dargs,
int* array_of_psizes, int order, MPI_Datatype oldtype, MPI_Datatype* newtype));
WRAPPED_PMPI_CALL(int,MPI_Type_free_keyval,(int* keyval) ,(keyval))
WRAPPED_PMPI_CALL(int,MPI_Type_free,(MPI_Datatype * datatype),(datatype))
WRAPPED_PMPI_CALL(int,MPI_Type_get_attr ,(MPI_Datatype type, int type_keyval, void *attribute_val, int* flag),( type, type_keyval, attribute_val, flag))
+WRAPPED_PMPI_CALL(int,MPI_Type_get_contents,(MPI_Datatype datatype, int max_integers, int max_addresses, int max_datatypes, int* array_of_integers, MPI_Aint* array_of_addresses, MPI_Datatype *array_of_datatypes),(datatype, max_integers, max_addresses,max_datatypes, array_of_integers, array_of_addresses, array_of_datatypes))
+WRAPPED_PMPI_CALL(int,MPI_Type_get_envelope,( MPI_Datatype datatype, int *num_integers, int *num_addresses, int *num_datatypes, int *combiner),(datatype, num_integers, num_addresses, num_datatypes, combiner))
WRAPPED_PMPI_CALL(int,MPI_Type_get_extent,(MPI_Datatype datatype, MPI_Aint * lb, MPI_Aint * extent),(datatype, lb, extent))
WRAPPED_PMPI_CALL(int,MPI_Type_get_name,(MPI_Datatype datatype, char * name, int* len),(datatype,name,len))
WRAPPED_PMPI_CALL(int,MPI_Type_get_true_extent,(MPI_Datatype datatype, MPI_Aint * lb, MPI_Aint * extent),(datatype, lb, extent))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Publish_name,( char *service_name, MPI_Info info, char *port_name),( service_name, info, port_name))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Topo_test,(MPI_Comm comm, int* top_type) ,(comm, top_type))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Type_create_darray,(int size, int rank, int ndims, int* array_of_gsizes, int* array_of_distribs, int* array_of_dargs, int* array_of_psizes,int order, MPI_Datatype oldtype, MPI_Datatype *newtype) ,(size, rank, ndims, array_of_gsizes,array_of_distribs, array_of_dargs, array_of_psizes,order,oldtype, newtype))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Type_get_contents,(MPI_Datatype datatype, int max_integers, int max_addresses, int max_datatypes, int* array_of_integers, MPI_Aint* array_of_addresses, MPI_Datatype *array_of_datatypes),(datatype, max_integers, max_addresses,max_datatypes, array_of_integers, array_of_addresses, array_of_datatypes))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Type_get_envelope,( MPI_Datatype datatype, int *num_integers, int *num_addresses, int *num_datatypes, int *combiner),(datatype, num_integers, num_addresses, num_datatypes, combiner))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Type_match_size,(int typeclass,int size,MPI_Datatype *datatype),(typeclass,size,datatype))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Unpack_external,(char *datarep, void *inbuf, MPI_Aint insize, MPI_Aint *position, void *outbuf, int outcount, MPI_Datatype datatype),( datarep, inbuf, insize, position, outbuf, outcount, datatype))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Unpublish_name,( char *service_name, MPI_Info info, char *port_name),( service_name, info, port_name))
return type->attr_put<simgrid::smpi::Datatype>(type_keyval, attribute_val);
}
+int PMPI_Type_get_contents (MPI_Datatype type, int max_integers, int max_addresses,
+ int max_datatypes, int* array_of_integers, MPI_Aint* array_of_addresses,
+ MPI_Datatype *array_of_datatypes)
+{
+ CHECK_MPI_NULL(1, MPI_DATATYPE_NULL, MPI_ERR_TYPE, type)
+ CHECK_NEGATIVE(2, MPI_ERR_COUNT, max_integers)
+ CHECK_NEGATIVE(3, MPI_ERR_COUNT, max_addresses)
+ CHECK_NEGATIVE(4, MPI_ERR_COUNT, max_datatypes)
+ if(max_integers>0)
+ CHECK_NULL(5, MPI_ERR_ARG, array_of_integers)
+ if(max_addresses!=0)
+ CHECK_NULL(6, MPI_ERR_ARG, array_of_addresses)
+ if(max_datatypes!=0)
+ CHECK_NULL(7, MPI_ERR_ARG, array_of_datatypes)
+ return type->get_contents(max_integers, max_addresses, max_datatypes,
+ array_of_integers, array_of_addresses, array_of_datatypes);
+}
+
+int PMPI_Type_get_envelope (MPI_Datatype type, int *num_integers, int *num_addresses,
+ int *num_datatypes, int *combiner)
+{
+ CHECK_MPI_NULL(1, MPI_DATATYPE_NULL, MPI_ERR_TYPE, type)
+ CHECK_NULL(2, MPI_ERR_ARG, num_integers)
+ CHECK_NULL(3, MPI_ERR_ARG, num_addresses)
+ CHECK_NULL(4, MPI_ERR_ARG, num_datatypes)
+ CHECK_NULL(5, MPI_ERR_ARG, combiner)
+ return type->get_envelope(num_integers, num_addresses, num_datatypes, combiner);
+}
+
int PMPI_Type_delete_attr (MPI_Datatype type, int type_keyval)
{
CHECK_MPI_NULL(1, MPI_DATATYPE_NULL, MPI_ERR_TYPE, type)
namespace simgrid{
namespace smpi{
+class Datatype_contents {
+ public:
+ int combiner_;
+ int number_of_integers_;
+ int* integers_;
+ int number_of_addresses_;
+ MPI_Aint* addresses_;
+ int number_of_datatypes_;
+ MPI_Datatype* datatypes_;
+ Datatype_contents(int combiner,
+ int number_of_integers, const int* integers,
+ int number_of_addresses, const MPI_Aint* addresses,
+ int number_of_datatypes, const MPI_Datatype* datatypes);
+ ~Datatype_contents();
+};
class Datatype : public F2C, public Keyval{
char* name_ = nullptr;
public:
static std::unordered_map<int, smpi_key_elem> keyvals_;
static int keyval_id_;
+ Datatype_contents* contents_ = nullptr;
Datatype(int id, int size, MPI_Aint lb, MPI_Aint ub, int flags);
Datatype(char* name, int id, int size, MPI_Aint lb, MPI_Aint ub, int flags);
static int keyval_free(int* keyval);
int pack(const void* inbuf, int incount, void* outbuf, int outcount, int* position, const Comm* comm);
int unpack(const void* inbuf, int insize, int* position, void* outbuf, int outcount, const Comm* comm);
-
+ int get_contents(int max_integers, int max_addresses,
+ int max_datatypes, int* array_of_integers, MPI_Aint* array_of_addresses,
+ MPI_Datatype *array_of_datatypes);
+ int get_envelope(int* num_integers, int* num_addresses,
+ int* num_datatypes, int* combiner);
static int create_contiguous(int count, MPI_Datatype old_type, MPI_Aint lb, MPI_Datatype* new_type);
static int create_vector(int count, int blocklen, int stride, MPI_Datatype old_type, MPI_Datatype* new_type);
static int create_hvector(int count, int blocklen, MPI_Aint stride, MPI_Datatype old_type, MPI_Datatype* new_type);
}
}
}
+ contents_ = new Datatype_contents(MPI_COMBINER_DUP, 0, nullptr, 0, nullptr, 1, &datatype);
}
Datatype::~Datatype()
}
cleanup_attr<Datatype>();
-
+ delete contents_;
xbt_free(name_);
}
return MPI_SUCCESS;
}
+int Datatype::get_contents (int max_integers, int max_addresses,
+ int max_datatypes, int* array_of_integers, MPI_Aint* array_of_addresses,
+ MPI_Datatype *array_of_datatypes)
+{
+ if(contents_==nullptr)
+ return MPI_ERR_ARG;
+ if(max_integers<contents_->number_of_integers_)
+ return MPI_ERR_COUNT;
+ for(int i=0; i<contents_->number_of_integers_; i++){
+ array_of_integers[i]=contents_->integers_[i];
+ }
+ if(max_addresses<contents_->number_of_addresses_)
+ return MPI_ERR_COUNT;
+ for(int i=0; i<contents_->number_of_addresses_; i++){
+ array_of_addresses[i]=contents_->addresses_[i];
+ }
+ if(max_datatypes<contents_->number_of_datatypes_)
+ return MPI_ERR_COUNT;
+ for(int i=0; i<contents_->number_of_datatypes_; i++){
+ array_of_datatypes[i]=contents_->datatypes_[i];
+ contents_->datatypes_[i]->ref();
+ }
+ return MPI_SUCCESS;
+}
+
+int Datatype::get_envelope (int* num_integers, int* num_addresses,
+ int* num_datatypes, int* combiner)
+{
+ if(contents_==nullptr){
+ *combiner = MPI_COMBINER_NAMED;
+ }else{
+ *num_integers = contents_->number_of_integers_;
+ *num_addresses = contents_->number_of_addresses_;
+ *num_datatypes = contents_->number_of_datatypes_;
+ *combiner = contents_->combiner_;
+ }
+ return MPI_SUCCESS;
+}
+
int Datatype::copy(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
MPI_Datatype recvtype)
{
/* in this situation the data are contiguous thus it's not required to serialize and unserialize it*/
*new_type = new Datatype(count * block_length * old_type->size(), 0, ((count -1) * stride + block_length)*
old_type->size(), DT_FLAG_CONTIGUOUS);
+ int ints[3] = {count, block_length, stride};
+ (*new_type)->contents_ = new Datatype_contents(MPI_COMBINER_VECTOR, 3, ints, 0, nullptr, 1, &old_type);
retval=MPI_SUCCESS;
}
return retval;
}else{
/* in this situation the data are contiguous thus it's not required to serialize and unserialize it*/
*new_type = new Datatype(count * block_length * old_type->size(), 0, count * block_length * old_type->size(), DT_FLAG_CONTIGUOUS);
+ int ints[2] = {count, block_length};
+ (*new_type)->contents_ = new Datatype_contents(MPI_COMBINER_HVECTOR, 2, ints, 1, &stride, 1, &old_type);
retval=MPI_SUCCESS;
}
return retval;
namespace simgrid{
namespace smpi{
+
+Datatype_contents::Datatype_contents(int combiner,
+ int number_of_integers, const int* integers,
+ int number_of_addresses, const MPI_Aint* addresses,
+ int number_of_datatypes, const MPI_Datatype* datatypes)
+: combiner_(combiner), number_of_integers_(number_of_integers),
+ number_of_addresses_(number_of_addresses),
+ number_of_datatypes_(number_of_datatypes)
+{
+ integers_=new int[number_of_integers_];
+ for(int i=0; i<number_of_integers_; i++){
+ integers_[i]=integers[i];
+ }
+ addresses_=new MPI_Aint[number_of_addresses_];
+ for(int i=0; i<number_of_addresses_; i++){
+ addresses_[i]=addresses[i];
+ }
+ datatypes_=new MPI_Datatype[number_of_datatypes_];
+ for(int i=0; i<number_of_datatypes_; i++){
+ datatypes_[i]=datatypes[i];
+ }
+};
+Datatype_contents::~Datatype_contents(){
+ delete[] integers_;
+ delete[] addresses_;
+ delete[] datatypes_;
+}
+
Type_Contiguous::Type_Contiguous(int size, MPI_Aint lb, MPI_Aint ub, int flags, int block_count, MPI_Datatype old_type)
: Datatype(size, lb, ub, flags), block_count_(block_count), old_type_(old_type)
{
+ contents_ = new Datatype_contents(MPI_COMBINER_CONTIGUOUS, 1, &size, 0, nullptr, 1, &old_type);
old_type_->ref();
}
}
Type_Hvector::Type_Hvector(int size,MPI_Aint lb, MPI_Aint ub, int flags, int count, int block_length, MPI_Aint stride, MPI_Datatype old_type): Datatype(size, lb, ub, flags), block_count_(count), block_length_(block_length), block_stride_(stride), old_type_(old_type){
+ int ints[2] = {count, block_length};
+ contents_ = new Datatype_contents(MPI_COMBINER_HVECTOR, 2, ints, 1, &stride, 1, &old_type);
old_type->ref();
}
Type_Hvector::~Type_Hvector(){
MPI_Datatype old_type)
: Type_Hvector(size, lb, ub, flags, count, block_length, stride * old_type->get_extent(), old_type)
{
+ delete contents_;
+ int ints[3] = {count, block_length, stride};
+ contents_ = new Datatype_contents(MPI_COMBINER_VECTOR, 3, ints, 0, nullptr, 1, &old_type);
}
Type_Hindexed::Type_Hindexed(int size, MPI_Aint lb, MPI_Aint ub, int flags, int count, const int* block_lengths,
, block_indices_(new MPI_Aint[count])
, old_type_(old_type)
{
+ int* ints = new int[count+1];
+ ints[0]=count;
+ for(int i=1;i<=count;i++)
+ ints[i]=block_lengths[i];
+ contents_ = new Datatype_contents(MPI_COMBINER_HINDEXED, count+1, ints, count, block_indices, 1, &old_type);
+ delete[] ints;
old_type_->ref();
for (int i = 0; i < count; i++) {
block_lengths_[i] = block_lengths[i];
const int* block_indices, MPI_Datatype old_type)
: Type_Hindexed(size, lb, ub, flags, count, block_lengths, block_indices, old_type, old_type->get_extent())
{
+ delete contents_;
+ int* ints = new int[2*count+1];
+ ints[0]=count;
+ for(int i=1;i<=count;i++)
+ ints[i]=block_lengths[i-1];
+ for(int i=count+1;i<=2*count;i++)
+ ints[i]=block_indices[i-count-1];
+ contents_ = new Datatype_contents(MPI_COMBINER_INDEXED, 2*count+1, ints, 0, nullptr, 1, &old_type);
+ delete[] ints;
}
Type_Struct::Type_Struct(int size, MPI_Aint lb, MPI_Aint ub, int flags, int count, const int* block_lengths,
, block_indices_(new MPI_Aint[count])
, old_types_(new MPI_Datatype[count])
{
+ int* ints = new int[count+1];
+ ints[0]=count;
+ for(int i=1;i<=count;i++)
+ ints[i]=block_lengths[i];
+ contents_ = new Datatype_contents(MPI_COMBINER_INDEXED, count+1, ints, count, block_indices, count, old_types);
for (int i = 0; i < count; i++) {
block_lengths_[i]=block_lengths[i];
block_indices_[i]=block_indices[i];
hindexed_block_contents hvecblklen large_vec localpack longdouble simple-commit simple-pack simple-resized simple-size-extent
struct-derived-zeros struct-ezhov struct-pack struct-verydeep struct-zero-count tfree transpose-pack tresized2
tresized typecommit typefree typelb typename unpack vecblklen zeroblks zeroparms
- subarray subarray-pack)
+ subarray subarray-pack contents)
# not compiled files
- # blockindexed-misc contents darray-cyclic darray-pack get-elements get-elements-pairtype getpartelm get-struct
+ # blockindexed-misc darray-cyclic darray-pack get-elements get-elements-pairtype getpartelm get-struct
# hindexed-zeros indexed-misc large-count large_type large_type_sendrec lbub lots-of-type pairtype-pack
# pairtype-size-extent segtest simple-pack-external simple-pack-external2 sizedtypes slice-pack slice-pack-external struct-empty-el
# struct-no-real-type structpack2 subarray subarray-pack tmatchsize triangular-pack unusual-noncontigs
err = MPI_Type_get_envelope(MPI_FLOAT, &nints, &nadds, &ntypes, &combiner);
- if (combiner != MPI_COMBINER_NAMED)
+ if (combiner != MPI_COMBINER_NAMED || err != MPI_SUCCESS)
errs++;
if (verbose && combiner != MPI_COMBINER_NAMED)
fprintf(stderr, "combiner = %s; should be named\n", combiner_to_string(combiner));
/* decode */
err = MPI_Type_get_envelope(parent_type, &nints, &nadds, &ntypes, &combiner);
- if (nints != 3)
+ if (nints != 3 || err != MPI_SUCCESS)
errs++;
if (nadds != 0)
errs++;
/* decode */
err = MPI_Type_get_envelope(parent_type, &nints, &nadds, &ntypes, &combiner);
- if (nints != 7)
+ if (nints != 7 || err != MPI_SUCCESS)
errs++;
if (nadds != 0)
errs++;
-#needs PMPI_Type_get_envelope, PMPI_Type_get_contents
-#contents 1
+contents 1
gaddress 1
#complex games with negative extents...
#lbub 1