6 /* Prototypes for picky compilers */
7 void SetData ( double *, double *, int, int, int, int, int, int );
8 int CheckData ( double *, int, int, int, int, int );
10 This is an example of using scatterv to send a matrix from one
11 process to all others, with the matrix stored in Fortran order.
12 Note the use of an explicit UB to enable the sources to overlap.
14 This tests scatterv to make sure that it uses the datatype size
15 and extent correctly. It requires number of processors that
16 can be split with MPI_Dims_create.
20 void SetData( sendbuf, recvbuf, nx, ny, myrow, mycol, nrow, ncol )
21 double *sendbuf, *recvbuf;
22 int nx, ny, myrow, mycol, nrow, ncol;
24 int coldim, i, j, m, k;
27 if (myrow == 0 && mycol == 0) {
29 for (j=0; j<ncol; j++)
30 for (i=0; i<nrow; i++) {
31 p = sendbuf + i * nx + j * (ny * coldim);
32 for (m=0; m<ny; m++) {
33 for (k=0; k<nx; k++) {
34 p[k] = 1000 * j + 100 * i + m * nx + k;
40 for (i=0; i<nx*ny; i++)
44 int CheckData( recvbuf, nx, ny, myrow, mycol, nrow )
46 int nx, ny, myrow, mycol, nrow;
54 for (m=0; m<ny; m++) {
55 for (k=0; k<nx; k++) {
56 val = 1000 * mycol + 100 * myrow + m * nx + k;
61 "Error in (%d,%d) [%d,%d] location, got %f expected %f\n",
62 m, k, myrow, mycol, p[k], val );
64 else if (errs == 10) {
65 printf( "Too many errors; suppressing printing\n" );
74 int main( int argc, char **argv )
76 int rank, size, myrow, mycol, nx, ny, stride, cnt, i, j, errs, tot_errs;
77 double *sendbuf, *recvbuf;
78 MPI_Datatype vec, block, types[2];
83 int dims[2], periods[2], coords[2], lcoords[2];
87 MPI_Init( &argc, &argv );
88 MPI_Comm_rank( MPI_COMM_WORLD, &rank );
89 MPI_Comm_size( MPI_COMM_WORLD, &size );
91 /* Get a 2-d decomposition of the processes */
92 dims[0] = 0; dims[1] = 0;
93 MPI_Dims_create( size, 2, dims );
94 periods[0] = 0; periods[1] = 0;
95 MPI_Cart_create( MPI_COMM_WORLD, 2, dims, periods, 0, &comm2d );
96 MPI_Cart_get( comm2d, 2, dims, periods, coords );
100 printf( "Decomposition is [%d x %d]\n", dims[0], dims[1] );
102 /* Get the size of the matrix */
105 stride = nx * dims[0];
107 recvbuf = (double *)malloc( nx * ny * sizeof(double) );
109 MPI_Abort( MPI_COMM_WORLD, 1 );
112 if (myrow == 0 && mycol == 0) {
113 sendbuf = (double *)malloc( nx * ny * size * sizeof(double) );
115 MPI_Abort( MPI_COMM_WORLD, 1 );
118 sendcounts = (int *) malloc( size * sizeof(int) );
119 scdispls = (int *)malloc( size * sizeof(int) );
121 MPI_Type_vector( ny, nx, stride, MPI_DOUBLE, &vec );
122 blens[0] = 1; blens[1] = 1;
123 types[0] = vec; types[1] = MPI_UB;
124 displs[0] = 0; displs[1] = nx * sizeof(double);
126 MPI_Type_struct( 2, blens, displs, types, &block );
127 MPI_Type_free( &vec );
128 MPI_Type_commit( &block );
130 /* Set up the transfer */
132 for (i=0; i<dims[1]; i++) {
133 for (j=0; j<dims[0]; j++) {
135 /* Using Cart_coords makes sure that ranks (used by
136 sendrecv) matches the cartesian coordinates (used to
137 set data in the matrix) */
138 MPI_Cart_coords( comm2d, cnt, 2, lcoords );
139 scdispls[cnt++] = lcoords[0] + lcoords[1] * (dims[0] * ny);
143 SetData( sendbuf, recvbuf, nx, ny, myrow, mycol, dims[0], dims[1] );
144 MPI_Scatterv( sendbuf, sendcounts, scdispls, block,
145 recvbuf, nx * ny, MPI_DOUBLE, 0, comm2d );
146 if((errs = CheckData( recvbuf, nx, ny, myrow, mycol, dims[0] ))) {
147 fprintf( stdout, "Failed to transfer data\n" );
149 MPI_Allreduce( &errs, &tot_errs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
152 printf( "No errors\n" );
154 printf( "%d errors in use of MPI_SCATTERV\n", tot_errs );
157 if (sendbuf) free( sendbuf );
161 MPI_Type_free( &block );
162 MPI_Comm_free( &comm2d );