1 /* Copyright (c) 2012, 2014. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
8 * Block Matrix Multiplication example
13 #include "Matrix_init.h"
17 /*int sched_setaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask);
18 int sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask);
26 XBT_LOG_NEW_DEFAULT_CATEGORY(MM_mpi,
27 "Messages specific for this msg example");
31 int main(int argc, char ** argv)
34 size_t m = 1024 , n = 1024 , k = 1024;
36 size_t Block_size = k/NB_Block ;
37 size_t NB_groups = 1, group = 0, key = 0;
46 size_t row, col, size_row, size_col; //description: vitual processor topology
50 MPI_Init(&argc, &argv);
52 /* Find out my identity in the default communicator */
54 MPI_Comm_rank ( MPI_COMM_WORLD, &myrank );
55 MPI_Comm_size ( MPI_COMM_WORLD, &NB_proc );
58 for (size_col=NB_proc/2; NB_proc%size_col; size_col--);
62 size_row = NB_proc/size_col;
63 if (size_row > size_col){
65 size_row = NB_proc/size_col;
78 //get the parameter from command line
79 while ((opt = getopt(argc, argv, "hr:c:M:N:K:B:G:g:k:P:")) != -1) {
83 "Usage: mxm_cblas_test [options]\n"
84 " -M I M size (default: %zu)\n"
85 " -N I N size (default: %zu)\n"
86 " -K I K size (default: %zu)\n"
87 " -B I Block size on the k dimension(default: %zu)\n"
88 " -G I Number of processor groups(default: %zu)\n"
89 " -g I group index(default: %zu)\n"
90 " -k I group rank(default: %zu)\n"
91 " -r I processor row size (default: %zu)\n"
92 " -c I processor col size (default: %zu)\n"
94 m, n, k, Block_size, NB_groups, group, key, row, col);
106 Block_size = atoi(optarg);
109 NB_groups = atoi(optarg);
112 group = atoi(optarg);
118 size_row = atoi(optarg);
121 size_col = atoi(optarg);
124 str_mask = strdup(optarg);
135 // Defined the device if we use the GPU
136 //TODO explain parameters
139 two_dot_five( m, k, n, Block_size, group, key,
140 size_row, size_col, NB_groups);
142 // close properly the pragram
143 MPI_Barrier(MPI_COMM_WORLD);