Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
Merge branch 'master' of git+ssh://scm.gforge.inria.fr//gitroot/simgrid/simgrid
authorFrederic Suter <frederic.suter@cc.in2p3.fr>
Fri, 26 Feb 2016 12:32:49 +0000 (13:32 +0100)
committerFrederic Suter <frederic.suter@cc.in2p3.fr>
Fri, 26 Feb 2016 12:32:49 +0000 (13:32 +0100)
19 files changed:
examples/msg/actions/CMakeLists.txt
examples/msg/actions/actions.tesh
examples/msg/actions/actions.txt [deleted file]
examples/msg/actions/actions_allReduce.txt [deleted file]
examples/msg/actions/actions_barrier.txt [deleted file]
examples/msg/actions/actions_bcast.txt [deleted file]
examples/msg/actions/actions_reduce.txt [deleted file]
examples/msg/actions/actions_with_isend.txt [deleted file]
examples/msg/actions/deployment_split.xml
examples/msg/actions/mpi_actions_shared.txt [new file with mode: 0644]
examples/msg/actions/mpi_actions_split_p0.txt [moved from examples/msg/actions/actions_split_p0.txt with 100% similarity]
examples/msg/actions/mpi_actions_split_p1.txt [moved from examples/msg/actions/actions_split_p1.txt with 100% similarity]
examples/msg/actions/storage_actions.txt [moved from examples/msg/actions/actions_io.txt with 100% similarity]
examples/msg/chord/CMakeLists.txt
examples/msg/chord/run_chord [deleted file]
examples/msg/pmm/CMakeLists.txt
examples/msg/pmm/msg_pmm.c
examples/msg/pmm/pmm.tesh
examples/msg/pmm/pmm_9_deploy.xml [deleted file]

index 29a83d0..e6a2fca 100644 (file)
@@ -20,13 +20,8 @@ set(examples_src
   PARENT_SCOPE)
 set(txt_files
   ${txt_files}
-  ${CMAKE_CURRENT_SOURCE_DIR}/actions_io.txt
-  ${CMAKE_CURRENT_SOURCE_DIR}/actions_allReduce.txt
-  ${CMAKE_CURRENT_SOURCE_DIR}/actions_barrier.txt
-  ${CMAKE_CURRENT_SOURCE_DIR}/actions_bcast.txt
-  ${CMAKE_CURRENT_SOURCE_DIR}/actions_reduce.txt
-  ${CMAKE_CURRENT_SOURCE_DIR}/actions_split_p0.txt
-  ${CMAKE_CURRENT_SOURCE_DIR}/actions_split_p1.txt
-  ${CMAKE_CURRENT_SOURCE_DIR}/actions.txt
-  ${CMAKE_CURRENT_SOURCE_DIR}/actions_with_isend.txt
+  ${CMAKE_CURRENT_SOURCE_DIR}/mpi_actions_shared.txt
+  ${CMAKE_CURRENT_SOURCE_DIR}/mpi_actions_split_p0.txt
+  ${CMAKE_CURRENT_SOURCE_DIR}/mpi_actions_split_p1.txt
+  ${CMAKE_CURRENT_SOURCE_DIR}/storage_actions.txt
   PARENT_SCOPE)
index 0873dae..bce14de 100644 (file)
@@ -11,81 +11,32 @@ $ ${bindir:=.}/actions --log=actions.thres=verbose ${srcdir:=.}/../../platforms/
 > [ 32.703314] (0:maestro@) Simulation time 32.7033
 
 ! output sort 19
-$ ${bindir:=.}/actions --log=actions.thres=verbose ${srcdir:=.}/../../platforms/small_platform_fatpipe.xml deployment.xml actions_allReduce.txt "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
-> WARNING: THIS BINARY IS KINDA DEPRECATED
-> This example is still relevant if you want to learn about MSG-based trace replay, but if you want to simulate MPI-like traces, you should use the newer version that is in the examples/smpi/replay directory instead.
-> [  0.000000] (1:p0@Tremblay) p0 comm_size 3 0.000000
-> [  7.171139] (2:p1@Ruby) p1 allReduce 5e8 5e8 7.171139
-> [  7.171139] (3:p2@Perl) p2 allReduce 5e8 5e8 7.171139
-> [  7.171139] (1:p0@Tremblay) p0 allReduce 5e8 5e8 7.171139
-> [ 12.268239] (2:p1@Ruby) p1 compute 5e8 5.097100
-> [ 12.268239] (1:p0@Tremblay) p0 compute 5e8 5.097100
-> [ 12.268239] (3:p2@Perl) p2 compute 5e8 5.097100
-> [ 12.268239] (0:maestro@) Simulation time 12.2682
-
-! output sort 19
-$ ${bindir:=.}/actions --log=actions.thres=verbose ${srcdir:=.}/../../platforms/small_platform_fatpipe.xml deployment.xml actions_barrier.txt "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
-> WARNING: THIS BINARY IS KINDA DEPRECATED
-> This example is still relevant if you want to learn about MSG-based trace replay, but if you want to simulate MPI-like traces, you should use the newer version that is in the examples/smpi/replay directory instead.
-> [  0.000000] (1:p0@Tremblay) p0 comm_size 3 0.000000
-> [  0.000000] (2:p1@Ruby) p1 comm_size 3 0.000000
-> [  0.000000] (3:p2@Perl) p2 comm_size 3 0.000000
-> [  0.022653] (2:p1@Ruby) p1 recv p0 0.022653
-> [  0.022653] (1:p0@Tremblay) p0 send p1 1E7 0.022653
-> [  0.063430] (3:p2@Perl) p2 compute 4E6 0.040777
-> [  0.063430] (2:p1@Ruby) p1 compute 4E6 0.040777
-> [  0.068527] (1:p0@Tremblay) p0 compute 4.5E6 0.045874
-> [  0.068527] (0:maestro@) Simulation time 0.0685268
-
-! output sort 19
-$ ${bindir:=.}/actions --log=actions.thres=verbose ${srcdir:=.}/../../platforms/small_platform_fatpipe.xml deployment.xml actions_bcast.txt "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ ${bindir:=.}/actions --log=actions.thres=verbose ${srcdir:=.}/../../platforms/small_platform_fatpipe.xml deployment.xml mpi_actions_shared.txt "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
 > WARNING: THIS BINARY IS KINDA DEPRECATED
 > This example is still relevant if you want to learn about MSG-based trace replay, but if you want to simulate MPI-like traces, you should use the newer version that is in the examples/smpi/replay directory instead.
 > [  0.000000] (1:p0@Tremblay) p0 comm_size 3 0.000000
+> [  1.037020] (1:p0@Tremblay) p0 bcast 5e8 1.037020
 > [  1.037020] (2:p1@Ruby) p1 bcast 5e8 1.037020
 > [  1.037020] (3:p2@Perl) p2 bcast 5e8 1.037020
-> [  1.037020] (1:p0@Tremblay) p0 bcast 5e8 1.037020
-> [  3.075860] (2:p1@Ruby) p1 compute 2e8 2.038840
-> [  6.134119] (1:p0@Tremblay) p0 compute 5e8 5.097100
-> [  6.134119] (3:p2@Perl) p2 compute 5e8 5.097100
-> [  7.171139] (2:p1@Ruby) p1 bcast 5e8 4.095279
-> [  7.171139] (3:p2@Perl) p2 bcast 5e8 1.037020
-> [  7.171139] (1:p0@Tremblay) p0 bcast 5e8 1.037020
-> [  9.209979] (2:p1@Ruby) p1 compute 2e8 2.038840
-> [ 12.268239] (1:p0@Tremblay) p0 compute 5e8 5.097100
-> [ 12.268239] (3:p2@Perl) p2 compute 5e8 5.097100
-> [ 13.305258] (2:p1@Ruby) p1 reduce 5e8 5e8 4.095279
-> [ 13.305258] (3:p2@Perl) p2 reduce 5e8 5e8 1.037020
-> [ 18.402358] (1:p0@Tremblay) p0 reduce 5e8 5e8 6.134119
-> [ 18.402358] (0:maestro@) Simulation time 18.4024
-
-! output sort 19
-$ ${bindir:=.}/actions --log=actions.thres=verbose ${srcdir:=.}/../../platforms/small_platform_fatpipe.xml deployment.xml actions_reduce.txt "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
-> WARNING: THIS BINARY IS KINDA DEPRECATED
-> This example is still relevant if you want to learn about MSG-based trace replay, but if you want to simulate MPI-like traces, you should use the newer version that is in the examples/smpi/replay directory instead.
-> [  0.000000] (1:p0@Tremblay) p0 comm_size 3 0.000000
-> [  1.037020] (2:p1@Ruby) p1 reduce 5e8 5e8 1.037020
-> [  1.037020] (3:p2@Perl) p2 reduce 5e8 5e8 1.037020
-> [  6.134119] (2:p1@Ruby) p1 compute 5e8 5.097100
-> [  6.134119] (1:p0@Tremblay) p0 reduce 5e8 5e8 6.134119
-> [  6.134119] (3:p2@Perl) p2 compute 5e8 5.097100
-> [ 11.231219] (1:p0@Tremblay) p0 compute 5e8 5.097100
-> [ 11.231219] (0:maestro@) Simulation time 11.2312
-
-! output sort 19
-$ ${bindir:=.}/actions --log=actions.thres=verbose ${srcdir:=.}/../../platforms/small_platform_fatpipe.xml deployment.xml actions_with_isend.txt "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
-> WARNING: THIS BINARY IS KINDA DEPRECATED
-> This example is still relevant if you want to learn about MSG-based trace replay, but if you want to simulate MPI-like traces, you should use the newer version that is in the examples/smpi/replay directory instead.
-> [  0.000000] (2:p1@Ruby) p1 Irecv p0 0.000000
-> [  0.000000] (3:p2@Perl) p2 Irecv p1 0.000000
-> [  2.072088] (1:p0@Tremblay) p0 send p1 1e9 2.072088
-> [  5.097100] (3:p2@Perl) p2 compute 5e8 5.097100
-> [ 10.194200] (2:p1@Ruby) p1 compute 1e9 10.194200
-> [ 10.194200] (2:p1@Ruby) p1 wait 0.000000
-> [ 12.266287] (1:p0@Tremblay) p0 compute 1e9 10.194200
-> [ 12.266287] (3:p2@Perl) p2 wait 7.169187
-> [ 12.266287] (2:p1@Ruby) p1 send p2 1e9 2.072088
-> [ 12.266287] (3:p2@Perl) p2 Isend p0 1e9 0.000000
-> [ 14.338375] (1:p0@Tremblay) p0 recv p2 2.072088
-> [ 17.363387] (3:p2@Perl) p2 compute 5e8 5.097100
-> [ 17.363387] (0:maestro@) Simulation time 17.3634
+> [  1.082894] (1:p0@Tremblay) p0 compute 4.5E6 0.045874
+> [  1.123670] (1:p0@Tremblay) p0 compute 4E6 0.040777
+> [  1.149156] (1:p0@Tremblay) p0 compute 2.5E6 0.025485
+> [  1.149156] (2:p1@Ruby) p1 Irecv p0 0.000000
+> [  1.149156] (3:p2@Perl) p2 Irecv p1 0.000000
+> [  3.221244] (1:p0@Tremblay) p0 send p1 1e9 2.072088
+> [  6.246256] (3:p2@Perl) p2 compute 5e8 5.097100
+> [ 11.343355] (2:p1@Ruby) p1 compute 1e9 10.194200
+> [ 11.343355] (2:p1@Ruby) p1 wait 0.000000
+> [ 11.343355] (2:p1@Ruby) p1 Isend p2 1e9 0.000000
+> [ 13.415443] (1:p0@Tremblay) p0 compute 1e9 10.194200
+> [ 13.415443] (3:p2@Perl) p2 wait 7.169187
+> [ 14.452463] (2:p1@Ruby) p1 reduce 5e8 5e8 1.037020
+> [ 14.452463] (3:p2@Perl) p2 reduce 5e8 5e8 1.037020
+> [ 19.549562] (1:p0@Tremblay) p0 reduce 5e8 5e8 6.134119
+> [ 19.549562] (2:p1@Ruby) p1 compute 5e8 5.097100
+> [ 19.549562] (3:p2@Perl) p2 compute 5e8 5.097100
+> [ 24.646662] (1:p0@Tremblay) p0 compute 5e8 5.097100
+> [ 31.817801] (0:maestro@) Simulation time 31.8178
+> [ 31.817801] (1:p0@Tremblay) p0 allReduce 5e8 5e8 7.171139
+> [ 31.817801] (2:p1@Ruby) p1 allReduce 5e8 5e8 7.171139
+> [ 31.817801] (3:p2@Perl) p2 allReduce 5e8 5e8 7.171139
diff --git a/examples/msg/actions/actions.txt b/examples/msg/actions/actions.txt
deleted file mode 100644 (file)
index 402a0e5..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-# sample action file
-tutu send toto 1e10
-toto recv tutu
-tutu sleep 12
-toto compute 12
diff --git a/examples/msg/actions/actions_allReduce.txt b/examples/msg/actions/actions_allReduce.txt
deleted file mode 100644 (file)
index 7dbc0b7..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-p0 init
-p1 init
-p2 init
-
-p0 comm_size 3
-p0 allReduce 5e8 5e8
-p1 allReduce 5e8 5e8
-p2 allReduce 5e8 5e8
-
-p0 compute 5e8
-p1 compute 5e8
-p2 compute 5e8
-
-p0 finalize
-p1 finalize
-p2 finalize
diff --git a/examples/msg/actions/actions_barrier.txt b/examples/msg/actions/actions_barrier.txt
deleted file mode 100644 (file)
index f13e229..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-p0 init
-p1 init
-p2 init
-
-p0 comm_size 3
-p0 send p1 1E7
-p0 barrier
-p0 compute 4.5E6
-
-p1 comm_size 3
-p1 recv p0
-p1 barrier
-p1 compute 4E6
-
-p2 comm_size 3
-p2 barrier
-p2 compute 4E6
-
-p0 finalize
-p1 finalize
-p2 finalize
diff --git a/examples/msg/actions/actions_bcast.txt b/examples/msg/actions/actions_bcast.txt
deleted file mode 100644 (file)
index efcf6f6..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-p0 init
-p1 init
-p2 init
-
-p0 comm_size 3
-p0 bcast 5e8
-p1 bcast 5e8
-p2 bcast 5e8
-
-p0 compute 5e8
-p1 compute 2e8
-p2 compute 5e8
-
-p0 bcast 5e8
-p1 bcast 5e8
-p2 bcast 5e8
-
-p0 compute 5e8
-p1 compute 2e8
-p2 compute 5e8
-
-p0 reduce 5e8 5e8
-p1 reduce 5e8 5e8
-p2 reduce 5e8 5e8
-
-p0 finalize
-p1 finalize
-p2 finalize
diff --git a/examples/msg/actions/actions_reduce.txt b/examples/msg/actions/actions_reduce.txt
deleted file mode 100644 (file)
index c2ac680..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-p0 init
-p1 init
-p2 init
-
-p0 comm_size 3
-p0 reduce 5e8 5e8
-p1 reduce 5e8 5e8
-p2 reduce 5e8 5e8
-
-p0 compute 5e8
-p1 compute 5e8
-p2 compute 5e8
-
-p0 finalize
-p1 finalize
-p2 finalize
diff --git a/examples/msg/actions/actions_with_isend.txt b/examples/msg/actions/actions_with_isend.txt
deleted file mode 100644 (file)
index f486fcb..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-# sample action file
-p0 init
-p1 init
-p2 init
-
-p0 send p1 1e9
-p0 compute 1e9
-p0 recv p2
-
-p1 Irecv p0
-p1 compute 1e9
-p1 wait
-p1 send p2 1e9
-
-p2 Irecv p1
-p2 compute 5e8
-p2 wait
-p2 Isend p0 1e9
-p2 compute 5e8
-
-p0 finalize
-p1 finalize
-p2 finalize
-
index f028595..c73bd33 100644 (file)
@@ -6,9 +6,9 @@
          ./actions homogeneous_3_hosts.xml deployment_split.xml  -->
 
   <process host="Tremblay" function="p0">
-    <argument value="actions_split_p0.txt"/>
+    <argument value="mpi_actions_split_p0.txt"/>
   </process>
   <process host="Ruby" function="p1">
-    <argument value="actions_split_p1.txt"/>
+    <argument value="mpi_actions_split_p1.txt"/>
   </process>
 </platform>
diff --git a/examples/msg/actions/mpi_actions_shared.txt b/examples/msg/actions/mpi_actions_shared.txt
new file mode 100644 (file)
index 0000000..088c23b
--- /dev/null
@@ -0,0 +1,54 @@
+# sample action file
+p0 init
+p1 init
+p2 init
+
+p0 comm_size 3
+p0 bcast 5e8
+p1 bcast 5e8
+p2 bcast 5e8
+
+p0 compute 4.5E6
+p0 compute 4E6
+p0 compute 2.5E6
+
+p0 barrier
+p1 barrier
+p2 barrier
+
+p0 send p1 1e9
+p0 compute 1e9
+
+p1 Irecv p0
+p1 compute 1e9
+p1 wait
+p1 Isend p2 1e9
+
+p2 Irecv p1
+p2 compute 5e8
+p2 wait
+
+p0 barrier
+p1 barrier
+p2 barrier
+
+p0 reduce 5e8 5e8
+p1 reduce 5e8 5e8
+p2 reduce 5e8 5e8
+
+p0 compute 5e8
+p1 compute 5e8
+p2 compute 5e8
+
+p0 barrier
+p1 barrier
+p2 barrier
+
+p0 allReduce 5e8 5e8
+p1 allReduce 5e8 5e8
+p2 allReduce 5e8 5e8
+
+p0 finalize
+p1 finalize
+p2 finalize
+
index c0a4dd8..a931a21 100644 (file)
@@ -18,5 +18,4 @@ set(examples_src
 set(bin_files
   ${bin_files}
   ${CMAKE_CURRENT_SOURCE_DIR}/generate.py
-  ${CMAKE_CURRENT_SOURCE_DIR}/run_chord
   PARENT_SCOPE)
diff --git a/examples/msg/chord/run_chord b/examples/msg/chord/run_chord
deleted file mode 100755 (executable)
index 8c3049b..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/bin/bash
-
-# This script runs Chord with the specified number of nodes.
-# Usage: ./run_chord nb_nodes
-# The platform file is chosen as the smallest platform of the files
-# examples/platforms/cluster_with_*_hosts.xml that contains enough nodes.
-
-# check arguments number
-if [ $# != 1 ]; then
-  echo "Usage: ./run_chord nb_nodes"
-  exit 1
-fi
-
-# pick an appropriate platform file
-if [ $1 -le 100 ]; then
-  platform=100
-else
-  if [ $1 -le 1000 ]; then
-    platform=1000
-  else
-    if [ $1 -le 10000 ]; then
-      platform=10000
-    else
-      if [ $1 -le 100000 ]; then
-       platform=100000
-      else
-       if [ $1 -le 300000 ]; then
-         platform=300000
-       else
-         echo "Cannot find a platform file large enough."
-         exit 2
-       fi
-      fi
-    fi
-  fi
-fi
-
-# set up the deployment file
-nb_lines=$[ $1 + 3]
-head -n $nb_lines chord300000.xml > tmp_depl.xml
-echo "</platform>" >> tmp_depl.xml
-
-# run Chord
-./chord -nb_bits=24 -timeout=50 "../../platforms/cluster_with_${platform}_hosts.xml" tmp_depl.xml
-
-# cleanup
-# rm tmp_depl.xml
-
index 330bf98..9159113 100644 (file)
@@ -5,10 +5,6 @@ set(tesh_files
   ${tesh_files}
   ${CMAKE_CURRENT_SOURCE_DIR}/pmm.tesh
   PARENT_SCOPE)
-set(xml_files
-  ${xml_files}
-  ${CMAKE_CURRENT_SOURCE_DIR}/pmm_9_deploy.xml
-  PARENT_SCOPE)
 set(examples_src
   ${examples_src}
   ${CMAKE_CURRENT_SOURCE_DIR}/msg_pmm.c
index f1e02aa..587742a 100644 (file)
@@ -9,22 +9,15 @@
 #include "simgrid/msg.h"
 #include "xbt/matrix.h"
 #include "xbt/log.h"
-
-// #define BENCH_THIS_CODE /* Will only work from within the source tree as we require xbt/xbt_os_time.h, that is not public yet) */
-#ifdef BENCH_THIS_CODE
 #include "xbt/xbt_os_time.h"
-#endif
 
 /** @addtogroup MSG_examples
  * 
- * - <b>pmm/msg_pmm.c</b>: Parallel Matrix Multiplication is a little
- *   application. This is something that most MPI developper have
- *   written during their class, here implemented using MSG instead
- *   of MPI. 
+ * - <b>pmm/msg_pmm.c</b>: Parallel Matrix Multiplication is a little application. This is something that most MPI
+ *   developers have written during their class, here implemented using MSG instead of MPI.
  */
 
-XBT_LOG_NEW_DEFAULT_CATEGORY(msg_pmm,
-                             "Messages specific for this msg example");
+XBT_LOG_NEW_DEFAULT_CATEGORY(msg_pmm, "Messages specific for this msg example");
 
 /* This example should always be executed using a deployment of
  * GRID_SIZE * GRID_SIZE nodes. */
@@ -144,15 +137,12 @@ int node(int argc, char **argv)
     receive_results(results);
 
     /* First add our results */
-    xbt_matrix_copy_values(C, sC, NODE_MATRIX_SIZE, NODE_MATRIX_SIZE,
-                           0, 0, 0, 0, NULL);
+    xbt_matrix_copy_values(C, sC, NODE_MATRIX_SIZE, NODE_MATRIX_SIZE, 0, 0, 0, 0, NULL);
 
     /* Reconstruct the rest of the result matrix */
     for (node = 1; node < GRID_NUM_NODES; node++){
-      xbt_matrix_copy_values(C, results[node]->sC,
-                             NODE_MATRIX_SIZE, NODE_MATRIX_SIZE,
-                             NODE_MATRIX_SIZE * results[node]->row,
-                             NODE_MATRIX_SIZE * results[node]->col,
+      xbt_matrix_copy_values(C, results[node]->sC, NODE_MATRIX_SIZE, NODE_MATRIX_SIZE,
+                             NODE_MATRIX_SIZE * results[node]->row, NODE_MATRIX_SIZE * results[node]->col,
                              0, 0, NULL);
       xbt_matrix_free(results[node]->sC);
       xbt_free(results[node]);
@@ -173,8 +163,7 @@ int node(int argc, char **argv)
     result = xbt_new0(s_result_t, 1);
     result->row = myjob->row;
     result->col = myjob->col;
-    result->sC =
-      xbt_matrix_new_sub(sC, NODE_MATRIX_SIZE, NODE_MATRIX_SIZE, 0, 0, NULL);
+    result->sC = xbt_matrix_new_sub(sC, NODE_MATRIX_SIZE, NODE_MATRIX_SIZE, 0, 0, NULL);
     task = MSG_task_create("result",100,100,result);
     MSG_task_send(task, "0");
   }
@@ -217,8 +206,7 @@ static node_job_t wait_job(int selfid)
   msg_error_t err;
   snprintf(self_mbox, MAILBOX_NAME_SIZE - 1, "%d", selfid);
   err = MSG_task_receive(&task, self_mbox);
-  xbt_assert(err == MSG_OK, "Error while receiving from %s (%d)",
-             self_mbox, (int)err);
+  xbt_assert(err == MSG_OK, "Error while receiving from %s (%d)", self_mbox, (int)err);
   job = (node_job_t)MSG_task_get_data(task);
   MSG_task_destroy(task);
   XBT_VERB("Got Job (%d,%d)", job->row, job->col);
@@ -240,7 +228,6 @@ static void broadcast_matrix(xbt_matrix_t M, int num_nodes, int *nodes)
     MSG_task_dsend(task, node_mbox, task_cleanup);
     XBT_DEBUG("sub-matrix sent to %s", node_mbox);
   }
-
 }
 
 static void get_sub_matrix(xbt_matrix_t *sM, int selfid)
@@ -266,9 +253,6 @@ static void task_cleanup(void *arg){
   MSG_task_destroy(task);
 }
 
-/**
- * \brief Main function.
- */
 int main(int argc, char *argv[])
 {
 #ifdef BENCH_THIS_CODE
@@ -277,14 +261,18 @@ int main(int argc, char *argv[])
 
   MSG_init(&argc, argv);
 
-  char **options = &argv[1];
-  const char* platform_file = options[0];
-  const char* application_file = options[1];
-
-  MSG_create_environment(platform_file);
+  MSG_create_environment(argv[1]);
 
   MSG_function_register("node", node);
-  MSG_launch_application(application_file);
+  for(int i = 0 ; i< 9; i++) {
+    char *hostname = bprintf("node-%d.acme.org", i);
+    char **argvF = xbt_new(char *, 3);
+    argvF[0] = xbt_strdup("node");
+    argvF[1] = bprintf("%d", i);
+    argvF[2] = NULL;
+    MSG_process_create_with_arguments("node", node, NULL, MSG_host_by_name(hostname), 2, argvF);
+    xbt_free(hostname);
+  }
 
 #ifdef BENCH_THIS_CODE
   xbt_os_cputimer_start(timer);
@@ -326,13 +314,9 @@ static void create_jobs(xbt_matrix_t A, xbt_matrix_t B, node_job_t *jobs)
 
     /* Assign a sub matrix of A and B to the job */
     jobs[node]->A =
-      xbt_matrix_new_sub(A, NODE_MATRIX_SIZE, NODE_MATRIX_SIZE,
-                         NODE_MATRIX_SIZE * row, NODE_MATRIX_SIZE * col,
-                         NULL);
+      xbt_matrix_new_sub(A, NODE_MATRIX_SIZE, NODE_MATRIX_SIZE, NODE_MATRIX_SIZE * row, NODE_MATRIX_SIZE * col, NULL);
     jobs[node]->B =
-      xbt_matrix_new_sub(B, NODE_MATRIX_SIZE, NODE_MATRIX_SIZE,
-                         NODE_MATRIX_SIZE * row, NODE_MATRIX_SIZE * col,
-                         NULL);
+      xbt_matrix_new_sub(B, NODE_MATRIX_SIZE, NODE_MATRIX_SIZE, NODE_MATRIX_SIZE * row, NODE_MATRIX_SIZE * col, NULL);
 
     if (++col >= GRID_SIZE){
       col = 0;
index 44c61f4..bb6abf7 100644 (file)
@@ -3,7 +3,7 @@
 p Testing the Parallel Matrix Multiplication 
 
 ! timeout 120
-$ ./pmm/msg_pmm --log=msg_pmm.thres:verbose --log=no_loc ${srcdir:=.}/../platforms/cluster.xml ${srcdir:=.}/pmm/pmm_9_deploy.xml
+$ ./pmm/msg_pmm --log=msg_pmm.thres:verbose --log=no_loc ${srcdir:=.}/../platforms/cluster.xml
 > [node-0.acme.org:node:(1) 0.000000] [msg_pmm/VERBOSE] Create job 0
 > [node-0.acme.org:node:(1) 0.000000] [msg_pmm/VERBOSE] Create job 1
 > [node-0.acme.org:node:(1) 0.000000] [msg_pmm/VERBOSE] Create job 2
diff --git a/examples/msg/pmm/pmm_9_deploy.xml b/examples/msg/pmm/pmm_9_deploy.xml
deleted file mode 100644 (file)
index 140d9f5..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-<?xml version='1.0'?>
-<!DOCTYPE platform SYSTEM "http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd">
-<platform version="4">
-  <process host="node-0.acme.org" function="node"><argument value="0"/></process>
-  <process host="node-1.acme.org" function="node"><argument value="1"/></process>
-  <process host="node-2.acme.org" function="node"><argument value="2"/></process>
-  <process host="node-3.acme.org" function="node"><argument value="3"/></process>
-  <process host="node-4.acme.org" function="node"><argument value="4"/></process>
-  <process host="node-5.acme.org" function="node"><argument value="5"/></process>
-  <process host="node-6.acme.org" function="node"><argument value="6"/></process>
-  <process host="node-7.acme.org" function="node"><argument value="7"/></process>
-  <process host="node-8.acme.org" function="node"><argument value="8"/></process>
-</platform>