/* smpi_datatype.cpp -- MPI primitives to handle datatypes */
-/* Copyright (c) 2009-2017. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2009-2018. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
return (flags_ & DT_FLAG_BASIC);
}
+bool Datatype::is_replayable()
+{
+ return ((this==MPI_BYTE)||(this==MPI_DOUBLE)||(this==MPI_INT)||
+ (this==MPI_CHAR)||(this==MPI_SHORT)||(this==MPI_LONG)||(this==MPI_FLOAT));
+}
+
size_t Datatype::size(){
return size_;
}
int Datatype::pack(void* inbuf, int incount, void* outbuf, int outcount, int* position,MPI_Comm comm){
if (outcount - *position < incount*static_cast<int>(size_))
return MPI_ERR_BUFFER;
- Datatype::copy(inbuf == MPI_BOTTOM ? nullptr : inbuf, incount, this, static_cast<char*>(outbuf) + *position, outcount,
- MPI_CHAR);
+ Datatype::copy(inbuf, incount, this, static_cast<char*>(outbuf) + *position, outcount, MPI_CHAR);
*position += incount * size_;
return MPI_SUCCESS;
}
// FIXME Handle the case of a partial shared malloc.
if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){
- smpi_switch_data_segment(smpi_process()->index());
+ smpi_switch_data_segment(simgrid::s4u::Actor::self());
}
/* First check if we really have something to do */
if (recvcount > 0 && recvbuf != sendbuf) {
return MPI_SUCCESS;
}
+int Datatype::create_subarray(int ndims, int* array_of_sizes,
+ int* array_of_subsizes, int* array_of_starts,
+ int order, MPI_Datatype oldtype, MPI_Datatype *newtype){
+
+ int i, step, end;
+ MPI_Datatype tmp;
+
+ for (i=0; i < ndims; i++) {
+ if (array_of_subsizes[i] > array_of_sizes[i]){
+ XBT_WARN("subarray : array_of_subsizes > array_of_sizes for dim %d",i);
+ return MPI_ERR_ARG;
+ }
+ if (array_of_starts[i] + array_of_subsizes[i] > array_of_sizes[i]){
+ XBT_WARN("subarray : array_of_starts + array_of_subsizes > array_of_sizes for dim %d",i);
+ return MPI_ERR_ARG;
+ }
+ }
+
+ MPI_Aint extent = oldtype->get_extent();
+
+ if( order==MPI_ORDER_C ) {
+ i = ndims - 1;
+ step = -1;
+ end = -1;
+ } else {
+ i = 0;
+ step = 1;
+ end = ndims;
+ }
+
+ MPI_Aint size = (MPI_Aint)array_of_sizes[i] * (MPI_Aint)array_of_sizes[i+step];
+ MPI_Aint lb = (MPI_Aint)array_of_starts[i] + (MPI_Aint)array_of_starts[i+step] *(MPI_Aint)array_of_sizes[i];
+
+ create_vector( array_of_subsizes[i+step], array_of_subsizes[i], array_of_sizes[i],
+ oldtype, newtype );
+
+ tmp = *newtype;
+
+ for( i += 2 * step; i != end; i += step ) {
+ create_hvector( array_of_subsizes[i], 1, size * extent,
+ tmp, newtype );
+ unref(tmp);
+ lb += size * array_of_starts[i];
+ size *= array_of_sizes[i];
+ tmp = *newtype;
+ }
+
+ MPI_Aint lbs[1] = {lb * extent};
+ int sizes [1]={1};
+ //handle LB and UB with a resized call
+ create_hindexed( 1, sizes, lbs, tmp, newtype);
+ unref(tmp);
+
+ tmp = *newtype;
+ create_resized(tmp, 0, extent, newtype);
+
+ unref(tmp);
+ return MPI_SUCCESS;
+}
+
+int Datatype::create_resized(MPI_Datatype oldtype,MPI_Aint lb, MPI_Aint extent, MPI_Datatype *newtype){
+ int blocks[3] = {1, 1, 1};
+ MPI_Aint disps[3] = {lb, 0, lb + extent};
+ MPI_Datatype types[3] = {MPI_LB, oldtype, MPI_UB};
+
+ *newtype = new simgrid::smpi::Type_Struct(oldtype->size(), lb, lb + extent, DT_FLAG_DERIVED, 3, blocks, disps, types);
+
+ (*newtype)->addflag(~DT_FLAG_COMMITED);
+ return MPI_SUCCESS;
+}
+
Datatype* Datatype::f2c(int id){
return static_cast<Datatype*>(F2C::f2c(id));
}