/* pmm - parallel matrix multiplication "double diffusion" */
-/* Copyright (c) 2006, 2007, 2008, 2009, 2010, 2011. The SimGrid Team.
+/* Copyright (c) 2006-2015. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "msg/msg.h"
+
+#include "simgrid/msg.h"
#include "xbt/matrix.h"
#include "xbt/log.h"
+
+// #define BENCH_THIS_CODE /* Will only work from within the source tree as we require xbt/xbt_os_time.h, that is not public yet) */
+#ifdef BENCH_THIS_CODE
#include "xbt/xbt_os_time.h"
+#endif
+
+/** @addtogroup MSG_examples
+ *
+ * - <b>pmm/msg_pmm.c</b>: Parallel Matrix Multiplication is a little
+ * application. This is something that most MPI developper have
+ * written during their class, here implemented using MSG instead
+ * of MPI.
+ */
XBT_LOG_NEW_DEFAULT_CATEGORY(msg_pmm,
"Messages specific for this msg example");
-/* This example should always be executed using a MATRIX_SIZE multiple of
- * GRID_SIZE and with GRID_SIZE^2 nodes. */
-#define MATRIX_SIZE 900
-#define GRID_SIZE 3
+/* This example should always be executed using a deployment of
+ * GRID_SIZE * GRID_SIZE nodes. */
+#define GRID_SIZE 3 /* Modify to adjust the grid's size */
+#define NODE_MATRIX_SIZE 300 /* Amount of work done by each node*/
-#define MAILBOX_NAME_SIZE 10
#define GRID_NUM_NODES GRID_SIZE * GRID_SIZE
-#define NODE_MATRIX_SIZE MATRIX_SIZE / GRID_SIZE
+#define MATRIX_SIZE NODE_MATRIX_SIZE * GRID_SIZE
+#define MAILBOX_NAME_SIZE 10
#define NEIGHBOURS_COUNT GRID_SIZE - 1
/*
xbt_matrix_t B;
} s_node_job_t, *node_job_t;
-/**
+/*
* Structure for recovering results
*/
typedef struct s_result {
int k, myid;
char my_mbox[MAILBOX_NAME_SIZE];
node_job_t myjob, jobs[GRID_NUM_NODES];
- xbt_matrix_t A, B, C = NULL, sA, sB, sC;
+ xbt_matrix_t A, B, C, sA, sB, sC;
result_t result;
xbt_assert(argc != 1, "Wrong number of arguments for this node");
broadcast_jobs(jobs + 1);
}else{
+ A = B = C = NULL; /* Avoid warning at compilation */
myjob = wait_job(myid);
}
//xbt_matrix_dump(C, "C:res", 0, xbt_matrix_dump_display_double);
+ xbt_matrix_free(A);
+ xbt_matrix_free(B);
+ xbt_matrix_free(C);
+
/* The rest: return the result to node 0 */
}else{
- m_task_t task;
+ msg_task_t task;
XBT_VERB("Multiplication done. Send the sub-result.");
result->sC =
xbt_matrix_new_sub(sC, NODE_MATRIX_SIZE, NODE_MATRIX_SIZE, 0, 0, NULL);
task = MSG_task_create("result",100,100,result);
- MSG_task_dsend(task, "0", NULL);
+ MSG_task_send(task, "0");
}
/* Clean up and finish*/
+ xbt_matrix_free(sC);
xbt_matrix_free(myjob->A);
xbt_matrix_free(myjob->B);
xbt_free(myjob);
{
int node;
char node_mbox[MAILBOX_NAME_SIZE];
- m_task_t task;
+ msg_task_t task;
msg_comm_t comms[GRID_NUM_NODES - 1] = {0};
XBT_VERB("Broadcast Jobs");
}
MSG_comm_waitall(comms, GRID_NUM_NODES-1, -1);
+ for (node = 1; node < GRID_NUM_NODES; node++)
+ MSG_comm_destroy(comms[node - 1]);
}
static node_job_t wait_job(int selfid)
{
- m_task_t task = NULL;
+ msg_task_t task = NULL;
char self_mbox[MAILBOX_NAME_SIZE];
node_job_t job;
+ msg_error_t err;
snprintf(self_mbox, MAILBOX_NAME_SIZE - 1, "%d", selfid);
- MSG_task_receive(&task, self_mbox);
+ err = MSG_task_receive(&task, self_mbox);
+ if (err != MSG_OK)
+ xbt_die("Error while receiving from %s (%d)", self_mbox, (int)err);
job = (node_job_t)MSG_task_get_data(task);
MSG_task_destroy(task);
XBT_VERB("Got Job (%d,%d)", job->row, job->col);
{
int node;
char node_mbox[MAILBOX_NAME_SIZE];
- m_task_t task;
+ msg_task_t task;
xbt_matrix_t sM;
for(node=0; node < num_nodes; node++){
static void get_sub_matrix(xbt_matrix_t *sM, int selfid)
{
- m_task_t task = NULL;
+ msg_task_t task = NULL;
char node_mbox[MAILBOX_NAME_SIZE];
+ msg_error_t err;
XBT_VERB("Get sub-matrix");
snprintf(node_mbox, MAILBOX_NAME_SIZE - 1, "%d", selfid);
- MSG_task_receive(&task, node_mbox);
+ err = MSG_task_receive(&task, node_mbox);
+ if (err != MSG_OK)
+ xbt_die("Error while receiving from %s (%d)", node_mbox, (int)err);
*sM = (xbt_matrix_t)MSG_task_get_data(task);
MSG_task_destroy(task);
}
static void task_cleanup(void *arg){
- m_task_t task = (m_task_t)arg;
+ msg_task_t task = (msg_task_t)arg;
xbt_matrix_t m = (xbt_matrix_t)MSG_task_get_data(task);
xbt_matrix_free(m);
MSG_task_destroy(task);
*/
int main(int argc, char *argv[])
{
- xbt_os_timer_t timer = xbt_os_timer_new();
+#ifdef BENCH_THIS_CODE
+ xbt_os_cputimer_t timer = xbt_os_timer_new();
+#endif
- MSG_global_init(&argc, argv);
+ MSG_init(&argc, argv);
char **options = &argv[1];
const char* platform_file = options[0];
const char* application_file = options[1];
- MSG_set_channel_number(0);
MSG_create_environment(platform_file);
MSG_function_register("node", node);
MSG_launch_application(application_file);
- xbt_os_timer_start(timer);
- MSG_error_t res = MSG_main();
- xbt_os_timer_stop(timer);
+#ifdef BENCH_THIS_CODE
+ xbt_os_cputimer_start(timer);
+#endif
+ msg_error_t res = MSG_main();
+#ifdef BENCH_THIS_CODE
+ xbt_os_cputimer_stop(timer);
+#endif
XBT_CRITICAL("Simulated time: %g", MSG_get_clock());
- MSG_clean();
-
if (res == MSG_OK)
return 0;
else
static void receive_results(result_t *results){
int node;
msg_comm_t comms[GRID_NUM_NODES-1] = {0};
- m_task_t tasks[GRID_NUM_NODES-1] = {0};
+ msg_task_t tasks[GRID_NUM_NODES-1] = {0};
XBT_VERB("Receive Results.");
}
MSG_comm_waitall(comms, GRID_NUM_NODES - 1, -1);
+ for (node = 1; node < GRID_NUM_NODES; node++)
+ MSG_comm_destroy(comms[node - 1]);
/* Reconstruct the result matrix */
for (node = 1; node < GRID_NUM_NODES; node++){