ADD_TEST(simdag-test-typed-tasks ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/simdag --cd ${CMAKE_BINARY_DIR}/examples/simdag ${CMAKE_HOME_DIRECTORY}/examples/simdag/test_simdag_typed_tasks.tesh)
ADD_TEST(simdag-test-fail ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --setenv bindir=${CMAKE_BINARY_DIR}/examples/simdag --cd ${CMAKE_HOME_DIRECTORY}/examples/simdag test_simdag_fail.tesh)
ADD_TEST(simdag-test-comm-throttling ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/simdag --cd ${CMAKE_BINARY_DIR}/examples/simdag ${CMAKE_HOME_DIRECTORY}/examples/simdag/test_simdag_comm_throttling.tesh)
+ ADD_TEST(simdag-test-dax ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --setenv bindir=${CMAKE_HOME_DIRECTORY}/examples/simdag/dax --cd ${CMAKE_BINARY_DIR}/examples/simdag/dax ${CMAKE_HOME_DIRECTORY}/examples/simdag/dax/smalldax.tesh)
ADD_TEST(simdag-test-dax-cycle ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/simdag/dax --cd ${CMAKE_BINARY_DIR}/examples/simdag/dax ${CMAKE_HOME_DIRECTORY}/examples/simdag/dax/simple_dax_with_cycle.tesh)
ADD_TEST(simdag-test-prop ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/simdag --cd ${CMAKE_BINARY_DIR}/examples/simdag ${CMAKE_HOME_DIRECTORY}/examples/simdag/properties/test_prop.tesh)
ADD_TEST(simdag-test-minmin-scheduling ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --setenv bindir=${CMAKE_BINARY_DIR}/examples/simdag/scheduling --cd ${CMAKE_HOME_DIRECTORY}/examples/simdag/scheduling test_minmin.tesh)
ADD_TEST(memcheck-simdag-test-typed-tasks-0 ${CMAKE_BINARY_DIR}/examples/simdag/sd_typed_tasks_test --cfg=network/TCP_gamma:4194304 ${CMAKE_HOME_DIRECTORY}/examples/simdag/2clusters.xml --cd ${CMAKE_BINARY_DIR}/examples/simdag/)
ADD_TEST(memcheck-simdag-test-fail-0 ${CMAKE_BINARY_DIR}/examples/simdag/sd_fail --cd ${CMAKE_HOME_DIRECTORY}/examples/simdag/)
ADD_TEST(memcheck-simdag-test-comm-throttling-0 ${CMAKE_BINARY_DIR}/examples/simdag/sd_comm_throttling --cfg=network/TCP_gamma:4194304 ${CMAKE_HOME_DIRECTORY}/examples/simdag/2clusters.xml --cd ${CMAKE_BINARY_DIR}/examples/simdag/)
+ADD_TEST(memcheck-simdag-test-dax-0 ${CMAKE_BINARY_DIR}/examples/simdag/dax/dax_test --log=no_loc ${CMAKE_HOME_DIRECTORY}/examples/simdag/dax/../2clusters.xml ${CMAKE_HOME_DIRECTORY}/examples/simdag/dax/smalldax.xml --cd ${CMAKE_BINARY_DIR}/examples/simdag/dax/)
+ADD_TEST(memcheck-simdag-test-dax-1 cat ./smalldax.trace --cd ${CMAKE_BINARY_DIR}/examples/simdag/dax/)
+ADD_TEST(memcheck-simdag-test-dax-2 $ cmake -E remove -f ${srcdir:=.}/smalldax.trace --cd ${CMAKE_BINARY_DIR}/examples/simdag/dax/)
ADD_TEST(memcheck-simdag-test-dax-cycle-0 ${CMAKE_BINARY_DIR}/examples/simdag/dax/dax_test --log=no_loc ${CMAKE_HOME_DIRECTORY}/examples/simdag/dax/../2clusters.xml ${CMAKE_HOME_DIRECTORY}/examples/simdag/dax/simple_dax_with_cycle.xml --cd ${CMAKE_BINARY_DIR}/examples/simdag/dax/)
ADD_TEST(memcheck-simdag-test-prop-0 ${CMAKE_BINARY_DIR}/examples/simdag/properties/sd_prop ${CMAKE_HOME_DIRECTORY}/examples/simdag/../platforms/prop.xml --cd ${CMAKE_BINARY_DIR}/examples/simdag/)
ADD_TEST(memcheck-simdag-test-minmin-scheduling-0 ${CMAKE_BINARY_DIR}/examples/simdag/scheduling/minmin_test --log=sd_daxparse.thresh:critical ./simulacrum_7_hosts.xml ./Montage_25.xml --cd ${CMAKE_HOME_DIRECTORY}/examples/simdag/scheduling/)
SD_create_environment(argv[1]);
/* load the DAX file */
- if (!(dax = SD_daxload(argv[2]))){
+ dax = SD_daxload(argv[2]);
+ if (!dax){
+ XBT_ERROR("A problem occurred during DAX parsing (cycle or syntax). Do not continue this test");
free(tracefilename);
- xbt_die("A problem occurred during parsing. The simulation can't continue.");
+ SD_exit();
+ return -1;
}
/* Display all the tasks */
#! ./tesh
p Test the DAX loader with a DAX comprising a cycle.
-! expect signal SIGABRT
+! expect return 255
$ $SG_TEST_EXENV ./dax_test --log=no_loc ${srcdir:=.}/../2clusters.xml ${srcdir:=.}/simple_dax_with_cycle.xml
> [0.000000] [surf_workstation/INFO] surf_workstation_model_init_ptask_L07
> [0.000000] [sd_daxparse/WARNING] the task root is not marked
> [0.000000] [sd_daxparse/WARNING] the task 2@task2 is in a cycle
> [0.000000] [sd_daxparse/WARNING] the task 3@task3 is in a cycle
> [0.000000] [sd_daxparse/ERROR] The DAX described in simple_dax_with_cycle.xml is not a DAG. It contains a cycle.
-> [0.000000] [xbt/CRITICAL] A problem occurred during parsing. The simulation can't continue.
+> [0.000000] [test/ERROR] A problem occurred during DAX parsing (cycle or syntax). Do not continue this test
--- /dev/null
+#! ./tesh
+p Test the DAX loader on a small DAX instance
+
+$ $SG_TEST_EXENV ${bindir:=.}/dax_test --log=no_loc ${srcdir:=.}/../2clusters.xml ${srcdir:=.}/smalldax.xml
+> [0.000000] [surf_workstation/INFO] surf_workstation_model_init_ptask_L07
+> [0.000000] [sd_daxparse/WARNING] Ignoring file o1 size redefinition from 1000000 to 304
+> [0.000000] [sd_daxparse/WARNING] Ignoring file o2 size redefinition from 1000000 to 304
+> [0.000000] [test/INFO] ------------------- Display all tasks of the loaded DAG ---------------------------
+> [0.000000] [sd_task/INFO] Displaying task root
+> [0.000000] [sd_task/INFO] - state: schedulable not runnable
+> [0.000000] [sd_task/INFO] - kind: sequential computation
+> [0.000000] [sd_task/INFO] - amount: 0
+> [0.000000] [sd_task/INFO] - Dependencies to satisfy: 0
+> [0.000000] [sd_task/INFO] - post-dependencies:
+> [0.000000] [sd_task/INFO] root_i2_2@task2
+> [0.000000] [sd_task/INFO] root_i1_1@task1
+> [0.000000] [sd_task/INFO] Displaying task 1@task1
+> [0.000000] [sd_task/INFO] - state: not runnable
+> [0.000000] [sd_task/INFO] - kind: sequential computation
+> [0.000000] [sd_task/INFO] - amount: 42000000000
+> [0.000000] [sd_task/INFO] - Dependencies to satisfy: 1
+> [0.000000] [sd_task/INFO] - pre-dependencies:
+> [0.000000] [sd_task/INFO] root_i1_1@task1
+> [0.000000] [sd_task/INFO] - post-dependencies:
+> [0.000000] [sd_task/INFO] 3@task1
+> [0.000000] [sd_task/INFO] 1@task1_o1_3@task1
+> [0.000000] [sd_task/INFO] Displaying task 2@task2
+> [0.000000] [sd_task/INFO] - state: not runnable
+> [0.000000] [sd_task/INFO] - kind: sequential computation
+> [0.000000] [sd_task/INFO] - amount: 42000000000
+> [0.000000] [sd_task/INFO] - Dependencies to satisfy: 1
+> [0.000000] [sd_task/INFO] - pre-dependencies:
+> [0.000000] [sd_task/INFO] root_i2_2@task2
+> [0.000000] [sd_task/INFO] - post-dependencies:
+> [0.000000] [sd_task/INFO] 3@task1
+> [0.000000] [sd_task/INFO] 2@task2_o2_3@task1
+> [0.000000] [sd_task/INFO] Displaying task 3@task1
+> [0.000000] [sd_task/INFO] - state: not runnable
+> [0.000000] [sd_task/INFO] - kind: sequential computation
+> [0.000000] [sd_task/INFO] - amount: 42000000000
+> [0.000000] [sd_task/INFO] - Dependencies to satisfy: 4
+> [0.000000] [sd_task/INFO] - pre-dependencies:
+> [0.000000] [sd_task/INFO] 1@task1
+> [0.000000] [sd_task/INFO] 2@task2
+> [0.000000] [sd_task/INFO] 1@task1_o1_3@task1
+> [0.000000] [sd_task/INFO] 2@task2_o2_3@task1
+> [0.000000] [sd_task/INFO] - post-dependencies:
+> [0.000000] [sd_task/INFO] 3@task1_o3_end
+> [0.000000] [sd_task/INFO] Displaying task root_i2_2@task2
+> [0.000000] [sd_task/INFO] - state: not runnable
+> [0.000000] [sd_task/INFO] - kind: end-to-end communication
+> [0.000000] [sd_task/INFO] - amount: 1000000
+> [0.000000] [sd_task/INFO] - Dependencies to satisfy: 1
+> [0.000000] [sd_task/INFO] - pre-dependencies:
+> [0.000000] [sd_task/INFO] root
+> [0.000000] [sd_task/INFO] - post-dependencies:
+> [0.000000] [sd_task/INFO] 2@task2
+> [0.000000] [sd_task/INFO] Displaying task 1@task1_o1_3@task1
+> [0.000000] [sd_task/INFO] - state: not runnable
+> [0.000000] [sd_task/INFO] - kind: end-to-end communication
+> [0.000000] [sd_task/INFO] - amount: 1000000
+> [0.000000] [sd_task/INFO] - Dependencies to satisfy: 1
+> [0.000000] [sd_task/INFO] - pre-dependencies:
+> [0.000000] [sd_task/INFO] 1@task1
+> [0.000000] [sd_task/INFO] - post-dependencies:
+> [0.000000] [sd_task/INFO] 3@task1
+> [0.000000] [sd_task/INFO] Displaying task 2@task2_o2_3@task1
+> [0.000000] [sd_task/INFO] - state: not runnable
+> [0.000000] [sd_task/INFO] - kind: end-to-end communication
+> [0.000000] [sd_task/INFO] - amount: 1000000
+> [0.000000] [sd_task/INFO] - Dependencies to satisfy: 1
+> [0.000000] [sd_task/INFO] - pre-dependencies:
+> [0.000000] [sd_task/INFO] 2@task2
+> [0.000000] [sd_task/INFO] - post-dependencies:
+> [0.000000] [sd_task/INFO] 3@task1
+> [0.000000] [sd_task/INFO] Displaying task 3@task1_o3_end
+> [0.000000] [sd_task/INFO] - state: not runnable
+> [0.000000] [sd_task/INFO] - kind: end-to-end communication
+> [0.000000] [sd_task/INFO] - amount: 4167312
+> [0.000000] [sd_task/INFO] - Dependencies to satisfy: 1
+> [0.000000] [sd_task/INFO] - pre-dependencies:
+> [0.000000] [sd_task/INFO] 3@task1
+> [0.000000] [sd_task/INFO] - post-dependencies:
+> [0.000000] [sd_task/INFO] end
+> [0.000000] [sd_task/INFO] Displaying task root_i1_1@task1
+> [0.000000] [sd_task/INFO] - state: not runnable
+> [0.000000] [sd_task/INFO] - kind: end-to-end communication
+> [0.000000] [sd_task/INFO] - amount: 1000000
+> [0.000000] [sd_task/INFO] - Dependencies to satisfy: 1
+> [0.000000] [sd_task/INFO] - pre-dependencies:
+> [0.000000] [sd_task/INFO] root
+> [0.000000] [sd_task/INFO] - post-dependencies:
+> [0.000000] [sd_task/INFO] 1@task1
+> [0.000000] [sd_task/INFO] Displaying task end
+> [0.000000] [sd_task/INFO] - state: not runnable
+> [0.000000] [sd_task/INFO] - kind: sequential computation
+> [0.000000] [sd_task/INFO] - amount: 0
+> [0.000000] [sd_task/INFO] - Dependencies to satisfy: 1
+> [0.000000] [sd_task/INFO] - pre-dependencies:
+> [0.000000] [sd_task/INFO] 3@task1_o3_end
+> [0.000000] [test/INFO] ------------------- Schedule tasks ---------------------------
+> [0.000000] [test/INFO] ------------------- Run the schedule ---------------------------
+> [84.185919] [test/INFO] ------------------- Produce the trace file---------------------------
+> [84.185919] [test/INFO] Producing the trace of the run into ./smalldax.trace
+
+$ cat ./smalldax.trace
+> [0.000000] C1-00 compute 0.000000 # root
+> [0.030300] C1-01 compute 42000000000.000000 # 1@task1
+> [0.030300] C1-02 compute 42000000000.000000 # 2@task2
+> [42.060600] C1-03 compute 42000000000.000000 # 3@task1
+> [0.000000] C1-00 send C1-02 1000000.000000 # root_i2_2@task2
+> [0.030300] C1-02 recv C1-00 1000000.000000 # root_i2_2@task2
+> [42.030300] C1-01 send C1-03 1000000.000000 # 1@task1_o1_3@task1
+> [42.060600] C1-03 recv C1-01 1000000.000000 # 1@task1_o1_3@task1
+> [42.030300] C1-02 send C1-03 1000000.000000 # 2@task2_o2_3@task1
+> [42.060600] C1-03 recv C1-02 1000000.000000 # 2@task2_o2_3@task1
+> [84.060600] C1-03 send C1-00 4167312.000000 # 3@task1_o3_end
+> [84.185919] C1-00 recv C1-03 4167312.000000 # 3@task1_o3_end
+> [0.000000] C1-00 send C1-01 1000000.000000 # root_i1_1@task1
+> [0.030300] C1-01 recv C1-00 1000000.000000 # root_i1_1@task1
+> [84.185919] C1-00 compute 0.000000 # end
+
+$ cmake -E remove -f ${srcdir:=.}/smalldax.trace
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- generated: 2008-09-24T14:28:09-07:00 -->
+<!-- generated by: shishir [??] -->
+<adag xmlns="http://pegasus.isi.edu/schema/DAX"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://pegasus.isi.edu/schema/DAX
+ http://pegasus.isi.edu/schema/dax-2.1.xsd"
+ version="2.1" count="1" index="0" name="test" jobCount="25" fileCount="0" childCount="20">
+
+ <job id="1" namespace="SG" name="task1" version="1.0" runtime="10">
+ <uses file="i1" link="input" register="true" transfer="true" optional="false" type="data" size="1000000"/>
+ <uses file="o1" link="output" register="true" transfer="true" optional="false" type="data" size="1000000"/>
+ </job>
+
+ <job id="2" namespace="SG" name="task2" version="1.0" runtime="10">
+ <uses file="i2" link="input" register="true" transfer="true" optional="false" type="data" size="1000000"/>
+ <uses file="o2" link="output" register="true" transfer="true" optional="false" type="data" size="1000000"/>
+ </job>
+
+ <job id="3" namespace="SG" name="task1" version="1.0" runtime="10">
+ <uses file="o1" link="input" register="true" transfer="true" optional="false" type="data" size="304"/>
+ <uses file="o2" link="input" register="true" transfer="true" optional="false" type="data" size="304"/>
+ <uses file="o3" link="output" register="true" transfer="true" optional="false" type="data" size="4167312"/>
+ </job>
+
+<!-- part 3: list of control-flow dependencies (may be empty) -->
+ <child ref="3">
+ <parent ref="1"/>
+ <parent ref="2"/>
+ </child>
+</adag>
add_executable(smpi_traced_simple tracing/smpi_traced_simple.c)
add_executable(ttest01 ttest01.c)
add_executable(vector_test vector_test.c)
+ add_executable(hvector_test hvector_test.c)
+ add_executable(indexed_test indexed_test.c)
+ add_executable(struct_test struct_test.c)
add_executable(mc_bugged1 mc_bugged1.c)
add_executable(mc_bugged2 mc_bugged2.c)
add_executable(smpi_replay replay/replay.c)
target_link_libraries(smpi_traced_simple m simgrid smpi )
target_link_libraries(ttest01 m simgrid smpi )
target_link_libraries(vector_test m simgrid smpi )
+ target_link_libraries(hvector_test m simgrid smpi )
+ target_link_libraries(indexed_test m simgrid smpi )
+ target_link_libraries(struct_test m simgrid smpi )
target_link_libraries(mc_bugged1 m simgrid smpi )
target_link_libraries(mc_bugged2 m simgrid smpi )
target_link_libraries(smpi_replay m simgrid smpi )
${CMAKE_CURRENT_SOURCE_DIR}/barrier.c
${CMAKE_CURRENT_SOURCE_DIR}/bcbench.c
${CMAKE_CURRENT_SOURCE_DIR}/vector_test.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/hvector_test.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/indexed_test.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/struct_test.c
${CMAKE_CURRENT_SOURCE_DIR}/replay/replay.c
${CMAKE_CURRENT_SOURCE_DIR}/tracing/smpi_traced.c
${CMAKE_CURRENT_SOURCE_DIR}/tracing/smpi_traced_simple.c
--- /dev/null
+#include <stdio.h>
+#include "mpi.h"
+#define SIZE 4
+
+int main(int argc, char **argv) {
+
+ int rank, i, j;
+ double a[SIZE][SIZE];
+
+ MPI_Datatype columntype;
+
+ MPI_Init(&argc,&argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ MPI_Type_hvector(SIZE, 1, SIZE*sizeof(double), MPI_DOUBLE, &columntype);
+ MPI_Type_commit(&columntype);
+
+
+ if (rank == 0) {
+ for(i=0; i <SIZE;i++)
+ for(j=0; j <SIZE;j++)
+ a[i][j] = i*SIZE+j;
+ }
+
+ /* only one column is send
+ * this is an exemple for non-contignous data*/
+ MPI_Bcast(a, 1, columntype, 0, MPI_COMM_WORLD);
+
+ for(i=0; i<SIZE; i++){
+ for (j=0; j < SIZE; j++) {
+ printf("rank= %d, a[%d][%d]=%f\n",
+ rank, i, j, a[i][j]);
+ }
+ printf("\n");
+ }
+
+
+
+ MPI_Finalize();
+ return 0;
+}
+
--- /dev/null
+#include "mpi.h"
+#include <stdio.h>
+
+int main(int argc, char *argv[])
+{
+ int rank, size, i;
+ MPI_Datatype type, type2;
+ int blocklen[3] = { 2, 3, 1 };
+ int displacement[3] = { 0, 3, 8 };
+ int buffer[27];
+ MPI_Status status;
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ if (size < 2)
+ {
+ printf("Please run with 2 processes.\n");
+ MPI_Finalize();
+ return 1;
+ }
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ MPI_Type_contiguous(3, MPI_INT, &type2);
+ MPI_Type_commit(&type2);
+ MPI_Type_indexed(3, blocklen, displacement, type2, &type);
+ MPI_Type_commit(&type);
+
+ if (rank == 0)
+ {
+ for (i=0; i<27; i++)
+ buffer[i] = i;
+ MPI_Send(buffer, 1, type, 1, 123, MPI_COMM_WORLD);
+ }
+
+ if (rank == 1)
+ {
+ for (i=0; i<27; i++)
+ buffer[i] = -1;
+ MPI_Recv(buffer, 1, type, 0, 123, MPI_COMM_WORLD, &status);
+ for (i=0; i<27; i++)
+ printf("buffer[%d] = %d\n", i, buffer[i]);
+ fflush(stdout);
+ }
+
+ MPI_Finalize();
+ return 0;
+}
+
> 12 157.673709 2 1 6
> 13 222.850244 2 3
> 12 222.850244 2 3 4
-> 13 222.850244 2 3
-> 7 222.850244 1 3
+> 13 222.850245 2 3
+> 7 222.850245 1 3
> 13 231.413469 2 2
> 12 231.413469 2 2 4
-> 13 231.413469 2 2
-> 7 231.413469 1 2
> 13 231.413469 2 1
> 12 231.413469 2 1 4
-> 13 231.413469 2 1
-> 7 231.413469 1 1
-
+> 13 231.413470 2 1
+> 7 231.413470 1 1
+> 13 231.413470 2 2
+> 7 231.413470 1 2
$ rm -f ./simgrid.trace
--- /dev/null
+#include <stdio.h>
+#include "mpi.h"
+
+int main( argc, argv )
+int argc;
+char **argv;
+{
+ int rank;
+ struct { int a;int c; double b;int tab[2][3];} value;
+ MPI_Datatype mystruct;
+ int blocklens[3];
+ MPI_Aint indices[3];
+ MPI_Datatype old_types[3], type2;
+ int i,j;
+
+ MPI_Init( &argc, &argv );
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+
+ int tab[2][3]={{1*rank,2*rank,3*rank},{7*rank,8*rank,9*rank}};
+ MPI_Type_contiguous(3, MPI_INT, &type2);
+ MPI_Type_commit(&type2);
+
+ /* One value of each type, and two for the contiguous one */
+ blocklens[0] = 1;
+ blocklens[1] = 1;
+ blocklens[2] = 2;
+ /* The base types */
+ old_types[0] = MPI_INT;
+ old_types[1] = MPI_DOUBLE;
+ old_types[2] = type2;
+ /* The locations of each element */
+ MPI_Address( &value.a, &indices[0] );
+ MPI_Address( &value.b, &indices[1] );
+ MPI_Address( &tab, &indices[2] );
+ /* Make relative */
+ indices[2] = indices[2] - indices[0];
+ indices[1] = indices[1] - indices[0];
+ indices[0] = 0;
+
+ MPI_Type_struct( 3, blocklens, indices, old_types, &mystruct );
+ MPI_Type_commit( &mystruct );
+
+ if (rank == 0){
+ value.a=-2;
+ value.b=8.0;
+ }else{
+ value.a=10000;
+ value.b=5.0;
+ }
+
+ MPI_Bcast( &value, 1, mystruct, 0, MPI_COMM_WORLD );
+
+ printf( "Process %d got %d (-2?) and %lf (8.0?), tab (should be all 0): ", rank, value.a, value.b );
+
+ for(j=0; j<2;j++ )
+ for(i=0; i<3;i++ )
+ printf("%d ", tab[j][i]);
+
+ printf("\n");
+
+
+ /* Clean up the type */
+ MPI_Type_free( &mystruct );
+ MPI_Finalize( );
+ return 0;
+}
#define RECV 0x8
-//*****************************************************************************************
-
// this struct is here to handle the problem of non-contignous data
// for each such structure these function should be implemented (vector
// index hvector hindex struct)
void (*unserialize)(const void * input, void *output, size_t count, void* subtype);
} s_smpi_subtype_t;
-/*one exemple of implementation for the vector is already here*/
-typedef struct s_smpi_mpi_vector{
- s_smpi_subtype_t base;
- size_t block_stride;
- size_t block_length;
- size_t block_count;
- MPI_Datatype old_type;
- size_t size_oldtype;
-} s_smpi_mpi_vector_t;
-
typedef struct s_smpi_mpi_datatype{
size_t size;
/* this let us know if a serialization is required*/
void *substruct;
} s_smpi_mpi_datatype_t;
-
//*****************************************************************************************
typedef struct s_smpi_mpi_request {
void smpi_datatype_free(MPI_Datatype* type);
void smpi_datatype_commit(MPI_Datatype* datatype);
-void unserialize_vector( const void *contiguous_vector,
- void *noncontiguous_vector,
- size_t count,
- void *type);
-
-void serialize_vector( const void *noncontiguous_vector,
- void *contiguous_vector,
- size_t count,
- void *type);
-
-s_smpi_mpi_vector_t* smpi_datatype_vector_create( int block_stride,
- int block_length,
- int block_count,
- MPI_Datatype old_type,
- int size_oldtype);
-
-
void smpi_empty_status(MPI_Status * status);
MPI_Op smpi_op_new(MPI_User_function * function, int commute);
void smpi_op_destroy(MPI_Op op);
}else{
/* in this situation the data are contignous thus it's not
* required to serialize and unserialize it*/
- smpi_datatype_create(new_type, count * (blocklen) *
+ smpi_datatype_create(new_type, count * blocklen *
smpi_datatype_size(old_type),
0,
NULL,
return retval;
}
+
+
+/*
+Hvector Implementation - Vector with stride in bytes
+*/
+
+
+/*
+ * Copies noncontiguous data into contiguous memory.
+ * @param contiguous_hvector - output hvector
+ * @param noncontiguous_hvector - input hvector
+ * @param type - pointer contening :
+ * - stride - stride of between noncontiguous data, in bytes
+ * - block_length - the width or height of blocked matrix
+ * - count - the number of rows of matrix
+ */
+void serialize_hvector( const void *noncontiguous_hvector,
+ void *contiguous_hvector,
+ size_t count,
+ void *type)
+{
+ s_smpi_mpi_hvector_t* type_c = (s_smpi_mpi_hvector_t*)type;
+ int i;
+ char* contiguous_vector_char = (char*)contiguous_hvector;
+ char* noncontiguous_vector_char = (char*)noncontiguous_hvector;
+
+ for (i = 0; i < type_c->block_count * count; i++) {
+ memcpy(contiguous_vector_char,
+ noncontiguous_vector_char, type_c->block_length * type_c->size_oldtype);
+
+ contiguous_vector_char += type_c->block_length*type_c->size_oldtype;
+ noncontiguous_vector_char += type_c->block_stride;
+ }
+}
+/*
+ * Copies contiguous data into noncontiguous memory.
+ * @param noncontiguous_vector - output hvector
+ * @param contiguous_vector - input hvector
+ * @param type - pointer contening :
+ * - stride - stride of between noncontiguous data, in bytes
+ * - block_length - the width or height of blocked matrix
+ * - count - the number of rows of matrix
+ */
+void unserialize_hvector( const void *contiguous_vector,
+ void *noncontiguous_vector,
+ size_t count,
+ void *type)
+{
+ s_smpi_mpi_hvector_t* type_c = (s_smpi_mpi_hvector_t*)type;
+ int i;
+
+ char* contiguous_vector_char = (char*)contiguous_vector;
+ char* noncontiguous_vector_char = (char*)noncontiguous_vector;
+
+ for (i = 0; i < type_c->block_count * count; i++) {
+ memcpy(noncontiguous_vector_char,
+ contiguous_vector_char, type_c->block_length * type_c->size_oldtype);
+
+ contiguous_vector_char += type_c->block_length*type_c->size_oldtype;
+ noncontiguous_vector_char += type_c->block_stride;
+ }
+}
+
+/*
+ * Create a Sub type vector to be able to serialize and unserialize it
+ * the structre s_smpi_mpi_vector_t is derived from s_smpi_subtype which
+ * required the functions unserialize and serialize
+ *
+ */
+s_smpi_mpi_hvector_t* smpi_datatype_hvector_create( MPI_Aint block_stride,
+ int block_length,
+ int block_count,
+ MPI_Datatype old_type,
+ int size_oldtype){
+ s_smpi_mpi_hvector_t *new_t= xbt_new(s_smpi_mpi_hvector_t,1);
+ new_t->base.serialize = &serialize_hvector;
+ new_t->base.unserialize = &unserialize_hvector;
+ new_t->block_stride = block_stride;
+ new_t->block_length = block_length;
+ new_t->block_count = block_count;
+ new_t->old_type = old_type;
+ new_t->size_oldtype = size_oldtype;
+ return new_t;
+}
+
int smpi_datatype_hvector(int count, int blocklen, MPI_Aint stride, MPI_Datatype old_type, MPI_Datatype* new_type)
{
int retval;
if ((old_type->flags & DT_FLAG_COMMITED) != DT_FLAG_COMMITED) {
retval = MPI_ERR_TYPE;
} else {
- /*FIXME: as for the vector the data should be serialized and
- * unserialized moreover a structure derived from s_smpi_subtype should
- * be created*/
- smpi_datatype_create(new_type, count * ((blocklen *
- smpi_datatype_size(old_type))+stride),
- 0,
- NULL,
- DT_FLAG_VECTOR);
- retval=MPI_SUCCESS;
+ if(stride != blocklen*smpi_datatype_size(old_type)){
+ s_smpi_mpi_hvector_t* subtype = smpi_datatype_hvector_create( stride,
+ blocklen,
+ count,
+ old_type,
+ smpi_datatype_size(old_type));
+
+ smpi_datatype_create(new_type, count * blocklen *
+ smpi_datatype_size(old_type),
+ 1,
+ subtype,
+ DT_FLAG_VECTOR);
+ retval=MPI_SUCCESS;
+ }else{
+ smpi_datatype_create(new_type, count * blocklen *
+ smpi_datatype_size(old_type),
+ 0,
+ NULL,
+ DT_FLAG_VECTOR);
+ retval=MPI_SUCCESS;
+ }
}
return retval;
}
+/*
+Indexed Implementation
+*/
+
+/*
+ * Copies noncontiguous data into contiguous memory.
+ * @param contiguous_indexed - output indexed
+ * @param noncontiguous_indexed - input indexed
+ * @param type - pointer contening :
+ * - stride - stride of between noncontiguous data
+ * - block_length - the width or height of blocked matrix
+ * - count - the number of rows of matrix
+ */
+void serialize_indexed( const void *noncontiguous_indexed,
+ void *contiguous_indexed,
+ size_t count,
+ void *type)
+{
+ s_smpi_mpi_indexed_t* type_c = (s_smpi_mpi_indexed_t*)type;
+ int i;
+ char* contiguous_indexed_char = (char*)contiguous_indexed;
+ char* noncontiguous_indexed_char = (char*)noncontiguous_indexed;
+
+ for (i = 0; i < type_c->block_count * count; i++) {
+ memcpy(contiguous_indexed_char,
+ noncontiguous_indexed_char, type_c->block_lengths[i] * type_c->size_oldtype);
+
+ contiguous_indexed_char += type_c->block_lengths[i]*type_c->size_oldtype;
+ noncontiguous_indexed_char = (char*)noncontiguous_indexed + type_c->block_indices[i+1]*type_c->size_oldtype;
+ }
+}
+/*
+ * Copies contiguous data into noncontiguous memory.
+ * @param noncontiguous_indexed - output indexed
+ * @param contiguous_indexed - input indexed
+ * @param type - pointer contening :
+ * - stride - stride of between noncontiguous data
+ * - block_length - the width or height of blocked matrix
+ * - count - the number of rows of matrix
+ */
+void unserialize_indexed( const void *contiguous_indexed,
+ void *noncontiguous_indexed,
+ size_t count,
+ void *type)
+{
+ s_smpi_mpi_indexed_t* type_c = (s_smpi_mpi_indexed_t*)type;
+ int i;
+
+ char* contiguous_indexed_char = (char*)contiguous_indexed;
+ char* noncontiguous_indexed_char = (char*)noncontiguous_indexed;
+
+ for (i = 0; i < type_c->block_count * count; i++) {
+ memcpy(noncontiguous_indexed_char,
+ contiguous_indexed_char, type_c->block_lengths[i] * type_c->size_oldtype);
+
+ contiguous_indexed_char += type_c->block_lengths[i]*type_c->size_oldtype;
+ noncontiguous_indexed_char = (char*)noncontiguous_indexed + type_c->block_indices[i+1]*type_c->size_oldtype;
+ }
+}
+
+/*
+ * Create a Sub type indexed to be able to serialize and unserialize it
+ * the structre s_smpi_mpi_indexed_t is derived from s_smpi_subtype which
+ * required the functions unserialize and serialize
+ */
+s_smpi_mpi_indexed_t* smpi_datatype_indexed_create( int* block_lengths,
+ int* block_indices,
+ int block_count,
+ MPI_Datatype old_type,
+ int size_oldtype){
+ s_smpi_mpi_indexed_t *new_t= xbt_new(s_smpi_mpi_indexed_t,1);
+ new_t->base.serialize = &serialize_indexed;
+ new_t->base.unserialize = &unserialize_indexed;
+ //FIXME : copy those or assume they won't be freed ?
+ new_t->block_lengths = block_lengths;
+ new_t->block_indices = block_indices;
+ new_t->block_count = block_count;
+ new_t->old_type = old_type;
+ new_t->size_oldtype = size_oldtype;
+ return new_t;
+}
+
+
int smpi_datatype_indexed(int count, int* blocklens, int* indices, MPI_Datatype old_type, MPI_Datatype* new_type)
{
int i;
if ((old_type->flags & DT_FLAG_COMMITED) != DT_FLAG_COMMITED) {
retval = MPI_ERR_TYPE;
} else {
- /*FIXME: as for the vector the data should be serialized and
- * unserialized moreover a structure derived from s_smpi_subtype should
- * be created*/
- smpi_datatype_create(new_type, (size) *
- smpi_datatype_size(old_type),0, NULL, DT_FLAG_DATA);
+ s_smpi_mpi_indexed_t* subtype = smpi_datatype_indexed_create( blocklens,
+ indices,
+ count,
+ old_type,
+ smpi_datatype_size(old_type));
+
+ smpi_datatype_create(new_type, size *
+ smpi_datatype_size(old_type),1, subtype, DT_FLAG_DATA);
retval=MPI_SUCCESS;
}
return retval;
}
+
+/*
+Hindexed Implementation - Indexed with indices in bytes
+*/
+
+/*
+ * Copies noncontiguous data into contiguous memory.
+ * @param contiguous_hindexed - output hindexed
+ * @param noncontiguous_hindexed - input hindexed
+ * @param type - pointer contening :
+ * - stride - stride of between noncontiguous data
+ * - block_length - the width or height of blocked matrix
+ * - count - the number of rows of matrix
+ */
+void serialize_hindexed( const void *noncontiguous_hindexed,
+ void *contiguous_hindexed,
+ size_t count,
+ void *type)
+{
+ s_smpi_mpi_hindexed_t* type_c = (s_smpi_mpi_hindexed_t*)type;
+ int i;
+ char* contiguous_hindexed_char = (char*)contiguous_hindexed;
+ char* noncontiguous_hindexed_char = (char*)noncontiguous_hindexed;
+
+ for (i = 0; i < type_c->block_count * count; i++) {
+ memcpy(contiguous_hindexed_char,
+ noncontiguous_hindexed_char, type_c->block_lengths[i] * type_c->size_oldtype);
+
+ contiguous_hindexed_char += type_c->block_lengths[i]*type_c->size_oldtype;
+ noncontiguous_hindexed_char = (char*)noncontiguous_hindexed + type_c->block_indices[i+1];
+ }
+}
+/*
+ * Copies contiguous data into noncontiguous memory.
+ * @param noncontiguous_hindexed - output hindexed
+ * @param contiguous_hindexed - input hindexed
+ * @param type - pointer contening :
+ * - stride - stride of between noncontiguous data
+ * - block_length - the width or height of blocked matrix
+ * - count - the number of rows of matrix
+ */
+void unserialize_hindexed( const void *contiguous_hindexed,
+ void *noncontiguous_hindexed,
+ size_t count,
+ void *type)
+{
+ s_smpi_mpi_hindexed_t* type_c = (s_smpi_mpi_hindexed_t*)type;
+ int i;
+
+ char* contiguous_hindexed_char = (char*)contiguous_hindexed;
+ char* noncontiguous_hindexed_char = (char*)noncontiguous_hindexed;
+
+ for (i = 0; i < type_c->block_count * count; i++) {
+ memcpy(noncontiguous_hindexed_char,
+ contiguous_hindexed_char, type_c->block_lengths[i] * type_c->size_oldtype);
+
+ contiguous_hindexed_char += type_c->block_lengths[i]*type_c->size_oldtype;
+ noncontiguous_hindexed_char = (char*)noncontiguous_hindexed + type_c->block_indices[i+1];
+ }
+}
+
+/*
+ * Create a Sub type hindexed to be able to serialize and unserialize it
+ * the structre s_smpi_mpi_hindexed_t is derived from s_smpi_subtype which
+ * required the functions unserialize and serialize
+ */
+s_smpi_mpi_hindexed_t* smpi_datatype_hindexed_create( int* block_lengths,
+ MPI_Aint* block_indices,
+ int block_count,
+ MPI_Datatype old_type,
+ int size_oldtype){
+ s_smpi_mpi_hindexed_t *new_t= xbt_new(s_smpi_mpi_hindexed_t,1);
+ new_t->base.serialize = &serialize_hindexed;
+ new_t->base.unserialize = &unserialize_hindexed;
+ //FIXME : copy those or assume they won't be freed ?
+ new_t->block_lengths = block_lengths;
+ new_t->block_indices = block_indices;
+ new_t->block_count = block_count;
+ new_t->old_type = old_type;
+ new_t->size_oldtype = size_oldtype;
+ return new_t;
+}
+
+
int smpi_datatype_hindexed(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype old_type, MPI_Datatype* new_type)
{
int i;
if ((old_type->flags & DT_FLAG_COMMITED) != DT_FLAG_COMMITED) {
retval = MPI_ERR_TYPE;
} else {
- /*FIXME: as for the vector the data should be serialized and
- * unserialized moreover a structure derived from s_smpi_subtype should
- * be created*/
- smpi_datatype_create(new_type,(size * smpi_datatype_size(old_type)), 0,NULL, DT_FLAG_DATA);
+ s_smpi_mpi_hindexed_t* subtype = smpi_datatype_hindexed_create( blocklens,
+ indices,
+ count,
+ old_type,
+ smpi_datatype_size(old_type));
+
+ smpi_datatype_create(new_type, size *
+ smpi_datatype_size(old_type),1, subtype, DT_FLAG_DATA);
retval=MPI_SUCCESS;
}
return retval;
}
+
+/*
+struct Implementation - Indexed with indices in bytes
+*/
+
+/*
+ * Copies noncontiguous data into contiguous memory.
+ * @param contiguous_struct - output struct
+ * @param noncontiguous_struct - input struct
+ * @param type - pointer contening :
+ * - stride - stride of between noncontiguous data
+ * - block_length - the width or height of blocked matrix
+ * - count - the number of rows of matrix
+ */
+void serialize_struct( const void *noncontiguous_struct,
+ void *contiguous_struct,
+ size_t count,
+ void *type)
+{
+ s_smpi_mpi_struct_t* type_c = (s_smpi_mpi_struct_t*)type;
+ int i;
+ char* contiguous_struct_char = (char*)contiguous_struct;
+ char* noncontiguous_struct_char = (char*)noncontiguous_struct;
+
+ for (i = 0; i < type_c->block_count * count; i++) {
+ memcpy(contiguous_struct_char,
+ noncontiguous_struct_char, type_c->block_lengths[i] * smpi_datatype_size(type_c->old_types[i]));
+ contiguous_struct_char += type_c->block_lengths[i]*smpi_datatype_size(type_c->old_types[i]);
+ noncontiguous_struct_char = (char*)noncontiguous_struct + type_c->block_indices[i+1];
+ }
+}
+/*
+ * Copies contiguous data into noncontiguous memory.
+ * @param noncontiguous_struct - output struct
+ * @param contiguous_struct - input struct
+ * @param type - pointer contening :
+ * - stride - stride of between noncontiguous data
+ * - block_length - the width or height of blocked matrix
+ * - count - the number of rows of matrix
+ */
+void unserialize_struct( const void *contiguous_struct,
+ void *noncontiguous_struct,
+ size_t count,
+ void *type)
+{
+ s_smpi_mpi_struct_t* type_c = (s_smpi_mpi_struct_t*)type;
+ int i;
+
+ char* contiguous_struct_char = (char*)contiguous_struct;
+ char* noncontiguous_struct_char = (char*)noncontiguous_struct;
+
+ for (i = 0; i < type_c->block_count * count; i++) {
+ memcpy(noncontiguous_struct_char,
+ contiguous_struct_char, type_c->block_lengths[i] * smpi_datatype_size(type_c->old_types[i]));
+ contiguous_struct_char += type_c->block_lengths[i]*smpi_datatype_size(type_c->old_types[i]);
+ noncontiguous_struct_char = (char*)noncontiguous_struct + type_c->block_indices[i+1];
+ }
+}
+
+/*
+ * Create a Sub type struct to be able to serialize and unserialize it
+ * the structre s_smpi_mpi_struct_t is derived from s_smpi_subtype which
+ * required the functions unserialize and serialize
+ */
+s_smpi_mpi_struct_t* smpi_datatype_struct_create( int* block_lengths,
+ MPI_Aint* block_indices,
+ int block_count,
+ MPI_Datatype* old_types){
+ s_smpi_mpi_struct_t *new_t= xbt_new(s_smpi_mpi_struct_t,1);
+ new_t->base.serialize = &serialize_struct;
+ new_t->base.unserialize = &unserialize_struct;
+ //FIXME : copy those or assume they won't be freed ?
+ new_t->block_lengths = block_lengths;
+ new_t->block_indices = block_indices;
+ new_t->block_count = block_count;
+ new_t->old_types = old_types;
+ return new_t;
+}
+
+
int smpi_datatype_struct(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype* old_types, MPI_Datatype* new_type)
{
int i;
return MPI_ERR_TYPE;
size += blocklens[i]*smpi_datatype_size(old_types[i]);
}
- /*FIXME: as for the vector the data should be serialized and
- * unserialized moreover a structure derived from s_smpi_subtype should
- * be created*/
- smpi_datatype_create(new_type, size,
- 0, NULL,
- DT_FLAG_DATA);
+
+
+ s_smpi_mpi_struct_t* subtype = smpi_datatype_struct_create( blocklens,
+ indices,
+ count,
+ old_types);
+
+ smpi_datatype_create(new_type, size ,1, subtype, DT_FLAG_DATA);
+
return MPI_SUCCESS;
}
extern MPI_Datatype MPI_PTR;
+
+//*****************************************************************************************
+
+/*
+ These are the structures that handle complex data type information,
+ used for serialization/unserialization of messages
+*/
+
+typedef struct s_smpi_mpi_vector{
+ s_smpi_subtype_t base;
+ size_t block_stride;
+ size_t block_length;
+ size_t block_count;
+ MPI_Datatype old_type;
+ size_t size_oldtype;
+} s_smpi_mpi_vector_t;
+
+typedef struct s_smpi_mpi_hvector{
+ s_smpi_subtype_t base;
+ MPI_Aint block_stride;
+ size_t block_length;
+ size_t block_count;
+ MPI_Datatype old_type;
+ size_t size_oldtype;
+} s_smpi_mpi_hvector_t;
+
+typedef struct s_smpi_mpi_indexed{
+ s_smpi_subtype_t base;
+ int* block_lengths;
+ int* block_indices;
+ size_t block_count;
+ MPI_Datatype old_type;
+ size_t size_oldtype;
+} s_smpi_mpi_indexed_t;
+
+typedef struct s_smpi_mpi_hindexed{
+ s_smpi_subtype_t base;
+ int* block_lengths;
+ MPI_Aint* block_indices;
+ size_t block_count;
+ MPI_Datatype old_type;
+ size_t size_oldtype;
+} s_smpi_mpi_hindexed_t;
+
+typedef struct s_smpi_mpi_struct{
+ s_smpi_subtype_t base;
+ int* block_lengths;
+ MPI_Aint* block_indices;
+ size_t block_count;
+ MPI_Datatype* old_types;
+} s_smpi_mpi_struct_t;
+
+/*
+ Functions to handle serialization/unserialization of messages, 3 for each type of MPI_Type
+ One for creating the substructure to handle, one for serialization, one for unserialization
+*/
+
+void unserialize_vector( const void *contiguous_vector,
+ void *noncontiguous_vector,
+ size_t count,
+ void *type);
+
+void serialize_vector( const void *noncontiguous_vector,
+ void *contiguous_vector,
+ size_t count,
+ void *type);
+
+s_smpi_mpi_vector_t* smpi_datatype_vector_create( int block_stride,
+ int block_length,
+ int block_count,
+ MPI_Datatype old_type,
+ int size_oldtype);
+
+void unserialize_hvector( const void *contiguous_vector,
+ void *noncontiguous_vector,
+ size_t count,
+ void *type);
+
+void serialize_hvector( const void *noncontiguous_vector,
+ void *contiguous_vector,
+ size_t count,
+ void *type);
+
+s_smpi_mpi_hvector_t* smpi_datatype_hvector_create( MPI_Aint block_stride,
+ int block_length,
+ int block_count,
+ MPI_Datatype old_type,
+ int size_oldtype);
+
+
+void unserialize_indexed( const void *contiguous_indexed,
+ void *noncontiguous_indexed,
+ size_t count,
+ void *type);
+
+void serialize_indexed( const void *noncontiguous_vector,
+ void *contiguous_vector,
+ size_t count,
+ void *type);
+
+s_smpi_mpi_indexed_t* smpi_datatype_indexed_create(int* block_lengths,
+ int* block_indices,
+ int block_count,
+ MPI_Datatype old_type,
+ int size_oldtype);
+
+void unserialize_hindexed( const void *contiguous_indexed,
+ void *noncontiguous_indexed,
+ size_t count,
+ void *type);
+
+void serialize_hindexed( const void *noncontiguous_vector,
+ void *contiguous_vector,
+ size_t count,
+ void *type);
+
+s_smpi_mpi_hindexed_t* smpi_datatype_hindexed_create(int* block_lengths,
+ MPI_Aint* block_indices,
+ int block_count,
+ MPI_Datatype old_type,
+ int size_oldtype);
+
+void unserialize_struct( const void *contiguous_indexed,
+ void *noncontiguous_indexed,
+ size_t count,
+ void *type);
+
+void serialize_struct( const void *noncontiguous_vector,
+ void *contiguous_vector,
+ size_t count,
+ void *type);
+
+s_smpi_mpi_struct_t* smpi_datatype_struct_create(int* block_lengths,
+ MPI_Aint* block_indices,
+ int block_count,
+ MPI_Datatype* old_types);
+
#endif