1 /* Copyright (c) 2009-2010, 2013-2014. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
15 #define SANITY_ERROR 2
16 #define GETTIMEOFDAY_ERROR 3
18 int main(int argc, char *argv[])
21 int N, n, i, j, k, current_iteration, successful_iterations = 0;
22 double *matrix = NULL, *vector = NULL, *vcalc, *vcheck;
24 struct timeval *start_time = NULL, *stop_time = NULL;
25 long parallel_usecs, parallel_usecs_total = 0, sequential_usecs, sequential_usecs_total = 0;
27 MPI_Init(&argc, &argv);
29 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
30 MPI_Comm_size(MPI_COMM_WORLD, &size);
33 // root node parses cmdline args
34 if (2 > argc || !isdigit(*argv[1])) {
35 printf("usage:\n%s <size>\n", argv[0]);
36 MPI_Abort(MPI_COMM_WORLD, USAGE_ERROR);
40 N = xbt_str_parse_int(argv[1], "Invalid size: %s");
42 start_time = (struct timeval *) malloc(sizeof(struct timeval));
43 stop_time = (struct timeval *) malloc(sizeof(struct timeval));
46 for (current_iteration = 0; current_iteration < ITERATIONS; current_iteration++) {
48 matrix = (double *) malloc(N * N * sizeof(double));
49 vector = (double *) malloc(N * sizeof(double));
51 for (i = 0; i < N * N; i++) {
52 matrix[i] = (double) rand() / ((double) RAND_MAX + 1);
55 for (i = 0; i < N; i++) {
56 vector[i] = (double) rand() / ((double) RAND_MAX + 1);
59 // for the sake of argument, the parallel algorithm begins when the root node begins to transmit the matrix to the
61 if (-1 == gettimeofday(start_time, NULL)) {
62 printf("couldn't set start_time on node 0!\n");
63 MPI_Abort(MPI_COMM_WORLD, GETTIMEOFDAY_ERROR);
64 exit(GETTIMEOFDAY_ERROR);
67 for (i = 1; i < size; i++) {
68 MPI_Send(&N, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
71 MPI_Recv(&N, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
74 // this algorithm uses at most N processors...
78 n = N / size + ((rank < (N % size)) ? 1 : 0);
81 for (i = 1, j = n; i < size && j < N; i++, j += k) {
82 k = N / size + ((i < (N % size)) ? 1 : 0);
83 MPI_Send(matrix + N * j, N * k, MPI_DOUBLE, i, 0, MPI_COMM_WORLD);
84 MPI_Send(vector, N, MPI_DOUBLE, i, 0, MPI_COMM_WORLD);
89 if (i != size || j != N) {
90 printf("index calc error: i = %d, size = %d, j = %d, N = %d\n", i, size, j, N);
91 MPI_Abort(MPI_COMM_WORLD, SANITY_ERROR);
96 vcalc = (double *) malloc(N * sizeof(double));
98 matrix = (double *) malloc(N * n * sizeof(double));
99 vector = (double *) malloc(N * sizeof(double));
100 vcalc = (double *) malloc(n * sizeof(double));
102 MPI_Recv(matrix, N * n, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, &status);
103 MPI_Recv(vector, N, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, &status);
106 for (i = 0; i < n; i++) {
108 for (j = 0; j < N; j++) {
109 vcalc[i] += matrix[N * i + j] * vector[j];
114 MPI_Send(vcalc, n, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD);
116 for (i = 1, j = n; i < size && j < N; i++, j += k) {
117 k = N / size + ((i < (N % size)) ? 1 : 0);
118 MPI_Recv(vcalc + j, k, MPI_DOUBLE, i, 0, MPI_COMM_WORLD, &status);
123 if (i != size || j != N) {
124 printf("index calc error 2: i = %d, size = %d, j = %d, N = %d\n", i, size, j, N);
125 MPI_Abort(MPI_COMM_WORLD, SANITY_ERROR);
130 if (-1 == gettimeofday(stop_time, NULL)) {
131 printf("couldn't set stop_time on node 0!\n");
132 MPI_Abort(MPI_COMM_WORLD, GETTIMEOFDAY_ERROR);
133 exit(GETTIMEOFDAY_ERROR);
136 parallel_usecs = (stop_time->tv_sec * 1000000 + stop_time->tv_usec) -
137 (start_time->tv_sec * 1000000 + start_time->tv_usec);
139 if (-1 == gettimeofday(start_time, NULL)) {
140 printf("couldn't set start_time on node 0!\n");
141 MPI_Abort(MPI_COMM_WORLD, GETTIMEOFDAY_ERROR);
142 exit(GETTIMEOFDAY_ERROR);
144 // calculate serially
145 vcheck = (double *) malloc(N * sizeof(double));
146 for (i = 0; i < N; i++) {
148 for (j = 0; j < N; j++) {
149 vcheck[i] += matrix[N * i + j] * vector[j];
153 if (-1 == gettimeofday(stop_time, NULL)) {
154 printf("couldn't set stop_time on node 0!\n");
155 MPI_Abort(MPI_COMM_WORLD, GETTIMEOFDAY_ERROR);
156 exit(GETTIMEOFDAY_ERROR);
159 sequential_usecs = (stop_time->tv_sec * 1000000 + stop_time->tv_usec) -
160 (start_time->tv_sec * 1000000 + start_time->tv_usec);
162 // verify correctness
163 for (i = 0; i < N && vcalc[i] == vcheck[i]; i++);
165 printf("prog: blocking, i: %d ", current_iteration);
168 printf("ptime: %ld us, stime: %ld us, speedup: %.3f, nodes: %d, efficiency: %.3f\n",
169 parallel_usecs, sequential_usecs, (double) sequential_usecs / (double) parallel_usecs, size,
170 (double) sequential_usecs / ((double) parallel_usecs * (double) size));
172 parallel_usecs_total += parallel_usecs;
173 sequential_usecs_total += sequential_usecs;
174 successful_iterations++;
176 printf("parallel calc != serial calc, ");
189 printf("prog: blocking, ");
190 if (0 < successful_iterations) {
191 printf("iterations: %d, avg. ptime: %.3f us, avg. stime: %.3f us, avg. speedup: %.3f, nodes: %d, avg. efficiency: %.3f\n",
192 successful_iterations, (double) parallel_usecs_total / (double) successful_iterations,
193 (double) sequential_usecs_total / (double) successful_iterations,
194 (double) sequential_usecs_total / (double) parallel_usecs_total, size,
195 (double) sequential_usecs_total / ((double) parallel_usecs_total * (double) size));
197 printf("no successful iterations!\n");