/* construct the platform and deploy the application */
Msg.createEnvironment(args[0]);
- new cloud.migration.Test("host0","Test").start();
+ new cloud.migration.Test("PM0","Test").start();
Msg.run();
}
}
Host host1 = null;
try {
- host0 = Host.getByName("host0");
- host1 = Host.getByName("host1");
+ host0 = Host.getByName("PM0");
+ host1 = Host.getByName("PM1");
}catch (HostNotFoundException e) {
e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates.
}
Msg.info("Round trip of VM1 (load "+load1+"%)");
vm1.setLoad(load1);
- Msg.info(" - Launch migration from host 0 to host 1");
+ Msg.info(" - Launch migration from PM0 to PM1");
startTime = Msg.getClock();
vm1.migrate(host1);
endTime = Msg.getClock();
- Msg.info(" - End of Migration from host 0 to host 1 (duration:"+(endTime-startTime)+")");
- Msg.info(" - Launch migration from host 1 to host 0");
+ Msg.info(" - End of Migration from PM0 to PM1 (duration:"+(endTime-startTime)+")");
+ Msg.info(" - Launch migration from PM1 to PM0");
startTime = Msg.getClock();
vm1.migrate(host0);
endTime = Msg.getClock();
- Msg.info(" - End of Migration from host 1 to host 0 (duration:"+(endTime-startTime)+")");
+ Msg.info(" - End of Migration from PM1 to PM0 (duration:"+(endTime-startTime)+")");
Msg.info("");
Msg.info("");
Msg.info("Round trip of VM1 (load "+load2+"%)");
vm1.setLoad(load2);
- Msg.info(" - Launch migration from host 0 to host 1");
+ Msg.info(" - Launch migration from PM0 to PM1");
startTime = Msg.getClock();
vm1.migrate(host1);
endTime = Msg.getClock();
- Msg.info(" - End of Migration from host 0 to host 1 (duration:"+(endTime-startTime)+")");
- Msg.info(" - Launch migration from host 1 to host 0");
+ Msg.info(" - End of Migration from PM0 to PM1 (duration:"+(endTime-startTime)+")");
+ Msg.info(" - Launch migration from PM1 to PM0");
startTime = Msg.getClock();
vm1.migrate(host0);
endTime = Msg.getClock();
- Msg.info(" - End of Migration from host 1 to host 0 (duration:"+(endTime-startTime)+")");
+ Msg.info(" - End of Migration from PM1 to PM0 (duration:"+(endTime-startTime)+")");
Main.setEndOfTest();
Msg.info("Forcefully destroy VMs");
// node or on the DST node.
public class TestHostOnOff extends Process{
- public static Host host0 = null;
- public static Host host1 = null;
- public static Host host2 = null;
+ protected Host host0 = null;
+ protected Host host1 = null;
+ protected Host host2 = null;
- TestHostOnOff(Host host, String name, String[] args) throws NativeException {
- super(host, name, args);
+ TestHostOnOff(String hostname, String name) throws HostNotFoundException, NativeException {
+ super(hostname, name);
}
public void main(String[] strings) throws MsgException {
/* get hosts 1 and 2*/
try {
- host0 = Host.getByName("host0");
- host1 = Host.getByName("host1");
- host1 = Host.getByName("host2");
+ host0 = Host.getByName("PM0");
+ host1 = Host.getByName("PM1");
+ host2 = Host.getByName("PM2");
}catch (HostNotFoundException e) {
e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates.
}
// Robustness on the SRC node
- //for (int i =0 ; i < 55000 ; i++)
- // testVMMigrate(host1, i);
+ for (int i =100 ; i < 55000 ; i+=100)
+ testVMMigrate(host1, i);
// Robustness on the DST node
- //for (int i =0 ; i < 55000 ; i++)
- // testVMMigrate(host2, i);
+ for (int i =0 ; i < 55000 ; i++)
+ testVMMigrate(host2, i);
/* End of Tests */
Msg.info("Nor more tests, Bye Bye !");
Main.setEndOfTest();
}
- public static void testVMMigrate (Host hostToKill, long killAt) throws MsgException {
+ public void testVMMigrate (Host hostToKill, long killAt) throws MsgException {
Msg.info("**** **** **** ***** ***** Test Migrate with host shutdown ***** ***** **** **** ****");
Msg.info("Turn on one host, assign a VM on this host, launch a process inside the VM, migrate the VM and "
+ "turn off either the SRC or DST");
String[] args = new String[3];
args[0] = "vm0";
- args[1] = "host1";
- args[2] = "host2";
- new Process(host1, "Migrate-" + new Random().nextDouble(), args) {
+ args[1] = "PM1";
+ args[2] = "PM2";
+ new Process(host1, "Migrate-" + killAt, args) {
public void main(String[] args) {
Host destHost = null;
Host sourceHost = null;
Msg.info("Migrate vm "+args[0]+" to node "+destHost.getName());
VM.getVMByName(args[0]).migrate(destHost);
} catch (HostFailureException e) {
- e.printStackTrace();
Msg.info("Something occurs during the migration that cannot validate the operation");
+ e.printStackTrace();
}
}
}
// Wait killAt ms before killing thehost
Process.sleep(killAt);
+ Msg.info("The migration process should be stopped and we should catch an exception");
hostToKill.off();
- Process.sleep(5);
- Msg.info("The migration process should be stopped and we should catch an exception\n");
- Process.sleep(5);
Process.sleep(50000);
Msg.info("Destroy VMs");
Process.sleep(20000);
}
- public static void testVMShutdownDestroy () throws HostFailureException {
+ public void testVMShutdownDestroy () throws HostFailureException {
Msg.info("**** **** **** ***** ***** Test shutdown a VM ***** ***** **** **** ****");
Msg.info("Turn on host1, assign a VM on host1, launch a process inside the VM, and turn off the vm, " +
"and check whether you can reallocate the same VM");
#! tesh
-$ java -classpath ${classpath:=.} cloud/migration/Main ${srcdir:=.}/../platforms/two_hosts_platform.xml
+$ java -classpath ${classpath:=.} cloud/migration/Main ${srcdir:=.}/../platforms/three_multicore_hosts.xml
> [0.000000] [jmsg/INFO] Using regular java threads.
-> [host0:Test:(1) 0.000000] [jmsg/INFO] This example evaluates the migration time of a VM in presence of collocated VMs on the source and the dest nodes
-> [host0:Test:(1) 0.000000] [jmsg/INFO] The migrated VM has a memory intensity rate of 70% of the network BW and a cpu load of 90% " (see cloudcom 2013 paper "Adding a Live Migration Model Into SimGrid" for further information)
-> [host0:Test:(1) 0.000000] [jmsg/INFO] Load of collocated VMs fluctuate between 0 and 90% in order to create a starvation issue and see whether it impacts or not the migration time
-> [host0:Test:(1) 0.000000] [jmsg/INFO] Round trip of VM1 (load 90%)
-> [host0:Test:(1) 0.000000] [jmsg/INFO] - Launch migration from host 0 to host 1
-> [host0:Test:(1) 0.000000] [jmsg/INFO] Start migration of VM vm0 to host1
-> [host0:Test:(1) 0.000000] [jmsg/INFO] currentLoad:90/ramSize:2048/dpIntensity:70/remaining:8.10E+11
-> [host0:Test:(1) 35.084170] [jmsg/INFO] End of migration of VM vm0 to node host1
-> [host0:Test:(1) 35.084170] [jmsg/INFO] - End of Migration from host 0 to host 1 (duration:35.0841702956701)
-> [host0:Test:(1) 35.084170] [jmsg/INFO] - Launch migration from host 1 to host 0
-> [host0:Test:(1) 35.084170] [jmsg/INFO] Start migration of VM vm0 to host0
-> [host0:Test:(1) 35.084170] [jmsg/INFO] currentLoad:90/ramSize:2048/dpIntensity:70/remaining:6.22E+11
-> [host0:Test:(1) 67.551019] [jmsg/INFO] End of migration of VM vm0 to node host0
-> [host0:Test:(1) 67.551019] [jmsg/INFO] - End of Migration from host 1 to host 0 (duration:32.46684874546391)
-> [host0:Test:(1) 67.551019] [jmsg/INFO]
-> [host0:Test:(1) 67.551019] [jmsg/INFO]
-> [host0:Test:(1) 67.551019] [jmsg/INFO] Round trip of VM1 (load 80%)
-> [host0:Test:(1) 67.551019] [jmsg/INFO] - Launch migration from host 0 to host 1
-> [host0:Test:(1) 67.551019] [jmsg/INFO] Start migration of VM vm0 to host1
-> [host0:Test:(1) 67.551019] [jmsg/INFO] currentLoad:80/ramSize:2048/dpIntensity:70/remaining:4.64E+11
-> [host0:Test:(1) 102.635189] [jmsg/INFO] End of migration of VM vm0 to node host1
-> [host0:Test:(1) 102.635189] [jmsg/INFO] - End of Migration from host 0 to host 1 (duration:35.08417029567006)
-> [host0:Test:(1) 102.635189] [jmsg/INFO] - Launch migration from host 1 to host 0
-> [host0:Test:(1) 102.635189] [jmsg/INFO] Start migration of VM vm0 to host0
-> [host0:Test:(1) 102.635189] [jmsg/INFO] currentLoad:80/ramSize:2048/dpIntensity:70/remaining:2.77E+11
-> [host0:Test:(1) 135.102038] [jmsg/INFO] End of migration of VM vm0 to node host0
-> [host0:Test:(1) 135.102038] [jmsg/INFO] - End of Migration from host 1 to host 0 (duration:32.46684874546395)
-> [host0:Test:(1) 135.102038] [jmsg/INFO] Forcefully destroy VMs
+> [PM0:Test:(1) 0.000000] [jmsg/INFO] This example evaluates the migration time of a VM in presence of collocated VMs on the source and the dest nodes
+> [PM0:Test:(1) 0.000000] [jmsg/INFO] The migrated VM has a memory intensity rate of 70% of the network BW and a cpu load of 90% " (see cloudcom 2013 paper "Adding a Live Migration Model Into SimGrid" for further information)
+> [PM0:Test:(1) 0.000000] [jmsg/INFO] Load of collocated VMs fluctuate between 0 and 90% in order to create a starvation issue and see whether it impacts or not the migration time
+> [PM0:Test:(1) 0.000000] [jmsg/INFO] Round trip of VM1 (load 90%)
+> [PM0:Test:(1) 0.000000] [jmsg/INFO] - Launch migration from PM0 to PM1
+> [PM0:Test:(1) 0.000000] [jmsg/INFO] Start migration of VM vm0 to PM1
+> [PM0:Test:(1) 0.000000] [jmsg/INFO] currentLoad:90/ramSize:2048/dpIntensity:70/remaining:8.10E+11
+> [PM0:Test:(1) 35.084170] [jmsg/INFO] End of migration of VM vm0 to node PM1
+> [PM0:Test:(1) 35.084170] [jmsg/INFO] - End of Migration from PM0 to PM1 (duration:35.0841702956701)
+> [PM0:Test:(1) 35.084170] [jmsg/INFO] - Launch migration from PM1 to PM0
+> [PM0:Test:(1) 35.084170] [jmsg/INFO] Start migration of VM vm0 to PM0
+> [PM0:Test:(1) 35.084170] [jmsg/INFO] currentLoad:90/ramSize:2048/dpIntensity:70/remaining:6.22E+11
+> [PM0:Test:(1) 67.551019] [jmsg/INFO] End of migration of VM vm0 to node PM0
+> [PM0:Test:(1) 67.551019] [jmsg/INFO] - End of Migration from PM1 to PM0 (duration:32.46684874546391)
+> [PM0:Test:(1) 67.551019] [jmsg/INFO]
+> [PM0:Test:(1) 67.551019] [jmsg/INFO]
+> [PM0:Test:(1) 67.551019] [jmsg/INFO] Round trip of VM1 (load 80%)
+> [PM0:Test:(1) 67.551019] [jmsg/INFO] - Launch migration from PM0 to PM1
+> [PM0:Test:(1) 67.551019] [jmsg/INFO] Start migration of VM vm0 to PM1
+> [PM0:Test:(1) 67.551019] [jmsg/INFO] currentLoad:80/ramSize:2048/dpIntensity:70/remaining:4.64E+11
+> [PM0:Test:(1) 102.635189] [jmsg/INFO] End of migration of VM vm0 to node PM1
+> [PM0:Test:(1) 102.635189] [jmsg/INFO] - End of Migration from PM0 to PM1 (duration:35.08417029567006)
+> [PM0:Test:(1) 102.635189] [jmsg/INFO] - Launch migration from PM1 to PM0
+> [PM0:Test:(1) 102.635189] [jmsg/INFO] Start migration of VM vm0 to PM0
+> [PM0:Test:(1) 102.635189] [jmsg/INFO] currentLoad:80/ramSize:2048/dpIntensity:70/remaining:2.77E+11
+> [PM0:Test:(1) 135.102038] [jmsg/INFO] End of migration of VM vm0 to node PM0
+> [PM0:Test:(1) 135.102038] [jmsg/INFO] - End of Migration from PM1 to PM0 (duration:32.46684874546395)
+> [PM0:Test:(1) 135.102038] [jmsg/INFO] Forcefully destroy VMs
> [135.102038] [jmsg/INFO] MSG_main finished; Cleaning up the simulation...
t3.task = MSG_task_create("Task3", 1e16, 0, NULL);
t4.task = MSG_task_create("Task4", 1e16, 0, NULL);
- MSG_process_create("worker1", worker_main, t1.task, pm1);
- MSG_process_create("worker2", worker_main, t2.task, pm1);
- MSG_process_create("worker3", worker_main, t3.task, pm1);
- MSG_process_create("worker4", worker_main, t4.task, pm1);
+ MSG_process_create("worker1", worker_main, t1.task, pm2);
+ MSG_process_create("worker2", worker_main, t2.task, pm2);
+ MSG_process_create("worker3", worker_main, t3.task, pm2);
+ MSG_process_create("worker4", worker_main, t4.task, pm2);
- XBT_INFO("## 1. start 4 tasks on PM1 (2 cores)");
+ XBT_INFO("## 1. start 4 tasks on PM2 (2 cores)");
task_data_init_clock(&t1);
task_data_init_clock(&t2);
task_data_init_clock(&t3);
task_data_get_clock(&t4);
XBT_INFO("## 2. pin all tasks to CPU0");
- MSG_task_set_affinity(t1.task, pm1, 0x01);
- MSG_task_set_affinity(t2.task, pm1, 0x01);
- MSG_task_set_affinity(t3.task, pm1, 0x01);
- MSG_task_set_affinity(t4.task, pm1, 0x01);
+ MSG_task_set_affinity(t1.task, pm2, 0x01);
+ MSG_task_set_affinity(t2.task, pm2, 0x01);
+ MSG_task_set_affinity(t3.task, pm2, 0x01);
+ MSG_task_set_affinity(t4.task, pm2, 0x01);
MSG_process_sleep(10);
task_data_get_clock(&t1);
task_data_get_clock(&t4);
XBT_INFO("## 3. clear the affinity of task4");
- MSG_task_set_affinity(t4.task, pm1, 0);
+ MSG_task_set_affinity(t4.task, pm2, 0);
MSG_process_sleep(10);
task_data_get_clock(&t1);
task_data_get_clock(&t4);
XBT_INFO("## 4. clear the affinity of task3");
- MSG_task_set_affinity(t3.task, pm1, 0);
+ MSG_task_set_affinity(t3.task, pm2, 0);
MSG_process_sleep(10);
task_data_get_clock(&t1);
task_data_get_clock(&t4);
XBT_INFO("## 5. clear the affinity of task2");
- MSG_task_set_affinity(t2.task, pm1, 0);
+ MSG_task_set_affinity(t2.task, pm2, 0);
MSG_process_sleep(10);
task_data_get_clock(&t1);
XBT_INFO("## 6. pin all tasks to CPU0 of another PM (no effect now)");
MSG_task_set_affinity(t1.task, pm0, 0);
MSG_task_set_affinity(t2.task, pm0, 0);
- MSG_task_set_affinity(t3.task, pm2, 0);
- MSG_task_set_affinity(t4.task, pm2, 0);
+ MSG_task_set_affinity(t3.task, pm1, 0);
+ MSG_task_set_affinity(t4.task, pm1, 0);
MSG_process_sleep(10);
task_data_get_clock(&t1);
msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t); // 4 cores
xbt_dynar_free(&hosts_dynar);
- /* set up VMs on PM2 (4 cores) */
- msg_vm_t vm0 = MSG_vm_create_core(pm2, "VM0");
- msg_vm_t vm1 = MSG_vm_create_core(pm2, "VM1");
- msg_vm_t vm2 = MSG_vm_create_core(pm2, "VM2");
- msg_vm_t vm3 = MSG_vm_create_core(pm2, "VM3");
+ /* set up VMs on PM1 (4 cores) */
+ msg_vm_t vm0 = MSG_vm_create_core(pm1, "VM0");
+ msg_vm_t vm1 = MSG_vm_create_core(pm1, "VM1");
+ msg_vm_t vm2 = MSG_vm_create_core(pm1, "VM2");
+ msg_vm_t vm3 = MSG_vm_create_core(pm1, "VM3");
s_vm_params_t params;
memset(¶ms, 0, sizeof(params));
MSG_process_create("worker3", worker_main, t3.task, vm3);
/* start experiments */
- XBT_INFO("## 1. start 4 VMs on PM2 (4 cores)");
+ XBT_INFO("## 1. start 4 VMs on PM1 (4 cores)");
task_data_init_clock(&t0);
task_data_init_clock(&t1);
task_data_init_clock(&t2);
task_data_get_clock(&t2);
task_data_get_clock(&t3);
- XBT_INFO("## 2. pin all VMs to CPU0 of PM2");
- MSG_vm_set_affinity(vm0, pm2, 0x01);
- MSG_vm_set_affinity(vm1, pm2, 0x01);
- MSG_vm_set_affinity(vm2, pm2, 0x01);
- MSG_vm_set_affinity(vm3, pm2, 0x01);
+ XBT_INFO("## 2. pin all VMs to CPU0 of PM1");
+ MSG_vm_set_affinity(vm0, pm1, 0x01);
+ MSG_vm_set_affinity(vm1, pm1, 0x01);
+ MSG_vm_set_affinity(vm2, pm1, 0x01);
+ MSG_vm_set_affinity(vm3, pm1, 0x01);
MSG_process_sleep(10);
task_data_get_clock(&t0);
task_data_get_clock(&t2);
task_data_get_clock(&t3);
- XBT_INFO("## 3. pin all VMs to CPU0 of PM1 (no effect at now)");
+ XBT_INFO("## 3. pin all VMs to CPU0 of PM2(no effect at now)");
/* Because VMs are on PM2, the below operations do not effect computation now. */
- MSG_vm_set_affinity(vm0, pm1, 0x01);
- MSG_vm_set_affinity(vm1, pm1, 0x01);
- MSG_vm_set_affinity(vm2, pm1, 0x01);
- MSG_vm_set_affinity(vm3, pm1, 0x01);
+ MSG_vm_set_affinity(vm0, pm2, 0x01);
+ MSG_vm_set_affinity(vm1, pm2, 0x01);
+ MSG_vm_set_affinity(vm2, pm2, 0x01);
+ MSG_vm_set_affinity(vm3, pm2, 0x01);
MSG_process_sleep(10);
task_data_get_clock(&t0);
task_data_get_clock(&t2);
task_data_get_clock(&t3);
- XBT_INFO("## 4. unpin VM0, and pin VM2 and VM3 to CPU1 of PM2");
- MSG_vm_set_affinity(vm0, pm2, 0x00);
- MSG_vm_set_affinity(vm2, pm2, 0x02);
- MSG_vm_set_affinity(vm3, pm2, 0x02);
+ XBT_INFO("## 4. unpin VM0, and pin VM2 and VM3 to CPU1 of PM1");
+ MSG_vm_set_affinity(vm0, pm1, 0x00);
+ MSG_vm_set_affinity(vm2, pm1, 0x02);
+ MSG_vm_set_affinity(vm3, pm1, 0x02);
MSG_process_sleep(10);
task_data_get_clock(&t0);
task_data_get_clock(&t2);
task_data_get_clock(&t3);
- XBT_INFO("## 6. migrate all VMs to PM1 (2 CPU cores, with affinity settings)");
- MSG_vm_migrate(vm0, pm1);
- MSG_vm_migrate(vm1, pm1);
- MSG_vm_migrate(vm2, pm1);
- MSG_vm_migrate(vm3, pm1);
+ XBT_INFO("## 6. migrate all VMs to PM2 (2 CPU cores, with affinity settings)");
+ MSG_vm_migrate(vm0, pm2);
+ MSG_vm_migrate(vm1, pm2);
+ MSG_vm_migrate(vm2, pm2);
+ MSG_vm_migrate(vm3, pm2);
MSG_process_sleep(10);
task_data_get_clock(&t0);
XBT_INFO("## 7. clear affinity settings on PM1");
- MSG_vm_set_affinity(vm0, pm1, 0);
- MSG_vm_set_affinity(vm1, pm1, 0);
- MSG_vm_set_affinity(vm2, pm1, 0);
- MSG_vm_set_affinity(vm3, pm1, 0);
+ MSG_vm_set_affinity(vm0, pm2, 0);
+ MSG_vm_set_affinity(vm1, pm2, 0);
+ MSG_vm_set_affinity(vm2, pm2, 0);
+ MSG_vm_set_affinity(vm3, pm2, 0);
MSG_process_sleep(10);
task_data_get_clock(&t0);
#! ./tesh
$ $SG_TEST_EXENV ${bindir:=.}/cloud-multicore$EXEEXT --log=no_loc ${srcdir:=.}/three_multicore_hosts.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
-> [ 0.000000] (0:maestro@) PM0: 1 core(s), 100000000.000000 flops/s per each
-> [ 0.000000] (0:maestro@) PM1: 2 core(s), 100000000.000000 flops/s per each
-> [ 0.000000] (0:maestro@) PM2: 4 core(s), 100000000.000000 flops/s per each
+> [ 0.000000] (0:maestro@) PM0: 4 core(s), 8095000000.000001 flops/s per each
+> [ 0.000000] (0:maestro@) PM1: 4 core(s), 8095000000.000001 flops/s per each
+> [ 0.000000] (0:maestro@) PM2: 2 core(s), 8095000000.000001 flops/s per each
> [ 0.000000] (1:master@PM0) === Test PM (set affinity) ===
-> [ 0.000000] (1:master@PM0) ## 1. start 4 tasks on PM1 (2 cores)
-> [ 10.000000] (1:master@PM0) Task1: 50000000.000000 fops/s
-> [ 10.000000] (1:master@PM0) Task2: 50000000.000000 fops/s
-> [ 10.000000] (1:master@PM0) Task3: 50000000.000000 fops/s
-> [ 10.000000] (1:master@PM0) Task4: 50000000.000000 fops/s
+> [ 0.000000] (1:master@PM0) ## 1. start 4 tasks on PM2 (2 cores)
+> [ 10.000000] (1:master@PM0) Task1: 4047500000.000000 fops/s
+> [ 10.000000] (1:master@PM0) Task2: 4047500000.000000 fops/s
+> [ 10.000000] (1:master@PM0) Task3: 4047500000.000000 fops/s
+> [ 10.000000] (1:master@PM0) Task4: 4047500000.000000 fops/s
> [ 10.000000] (1:master@PM0) ## 2. pin all tasks to CPU0
-> [ 10.000000] (1:master@PM0) set affinity(0x0001@PM1) for Task1
+> [ 10.000000] (1:master@PM0) set affinity(0x0001@PM2) for Task1
<!DOCTYPE platform SYSTEM "http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd">
<platform version="4">
<AS id="siteA" routing="Full">
- <host id="PM0" speed="100Mf" core="1"/>
- <host id="PM1" speed="100Mf" core="2"/>
- <host id="PM2" speed="100Mf" core="4"/>
+ <host id="PM0" speed="8.095Gf" core="4"/>
+ <host id="PM1" speed="8.095Gf" core="4"/>
+ <host id="PM2" speed="8.095Gf" core="2"/>
- <link id="link1" bandwidth="100Mbps" latency="10ms" />
+ <link id="link1" bandwidth="125MBps" latency="100us" />
<route src="PM0" dst="PM1"><link_ctn id="link1"/></route>
<route src="PM0" dst="PM2"><link_ctn id="link1"/></route>
+++ /dev/null
-<?xml version='1.0'?>
-<!DOCTYPE platform SYSTEM "http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd">
-<platform version="4">
- <AS id="AS0" routing="Full">
- <host id="host0" core="4" speed="8.095Gf"/>
- <host id="host1" core="4" speed="8.095Gf"/>
- <link id="link1" bandwidth="125MBps" latency="100us"/>
- <route src="host0" dst="host1"><link_ctn id="link1"/></route>
- </AS>
-</platform>
int main(int argc, char **argv) {
simgrid::s4u::Engine *e = new simgrid::s4u::Engine(&argc,argv);
- e->loadPlatform("../../platforms/two_hosts_platform.xml");
- new simgrid::s4u::Actor("worker", simgrid::s4u::Host::by_name("host0"), Worker());
- new simgrid::s4u::Actor("master", simgrid::s4u::Host::by_name("host1"), 0, Master());
+ e->loadPlatform("../../platforms/two_hosts.xml");
+ new simgrid::s4u::Actor("worker", simgrid::s4u::Host::by_name("Tremblay"), Worker());
+ new simgrid::s4u::Actor("master", simgrid::s4u::Host::by_name("Jupiter"), 0, Master());
e->run();
return 0;
}
#! ./tesh
$ $SG_TEST_EXENV ${bindir:=.}/s4u_basic
-> [host0:worker:(0) 0.000000] [s4u_test/INFO] Hello s4u, I'm ready to serve
-> [host1:master:(0) 0.000000] [s4u_test/INFO] Hello s4u, I have something to send
-> [host0:worker:(0) 0.001301] [s4u_test/INFO] I received 'GaBuZoMeu'
-> [host0:worker:(0) 0.001301] [s4u_test/INFO] I'm done. See you.
-> [host1:master:(0) 0.001301] [s4u_test/INFO] I'm done. See you.
+> [Tremblay:worker:(0) 0.000000] [s4u_test/INFO] Hello s4u, I'm ready to serve
+> [Jupiter:master:(0) 0.000000] [s4u_test/INFO] Hello s4u, I have something to send
+> [Tremblay:worker:(0) 0.001301] [s4u_test/INFO] I received 'GaBuZoMeu'
+> [Tremblay:worker:(0) 0.001301] [s4u_test/INFO] I'm done. See you.
+> [Jupiter:master:(0) 0.001301] [s4u_test/INFO] I'm done. See you.
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
- //Tests:
- //A: 0(isend -> wait) with 1(recv)
- int A = 1;
- //B: 0(send) with 1(irecv -> wait)
- int B = 1;
- //C: 0(N * isend -> N * wait) with 1(N * recv)
- int C = 1;
- //D: 0(N * isend -> N * waitany) with 1(N * recv)
- int D = 1;
- //E: 0(N*send) with 1(N*irecv, N*wait)
- int E = 1;
- //F: 0(N*send) with 1(N*irecv, N*waitany)
- int F = 1;
- //G: 0(N* isend -> waitall) with 1(N*recv)
- int G = 1;
- //H: 0(N*send) with 1(N*irecv, waitall)
- int H = 1;
- //I: 0(2*N*send, 2*N*Irecv, Waitall) with
- // 1(N*irecv, waitall, N*isend, N*waitany) with
- // 2(N*irecv, N*waitany, N*isend, waitall)
- int I = 1;
- //J: 0(N*isend, N*test, N*wait) with (N*irecv, N*test, N*wait)
- int J = 1;
-
+ /** Tests:
+ * A: 0(isend -> wait) with 1(recv)
+ * B: 0(send) with 1(irecv -> wait)
+ * C: 0(N * isend -> N * wait) with 1(N * recv)
+ * D: 0(N * isend -> N * waitany) with 1(N * recv)
+ * E: 0(N*send) with 1(N*irecv, N*wait)
+ * F: 0(N*send) with 1(N*irecv, N*waitany)
+ * G: 0(N* isend -> waitall) with 1(N*recv)
+ * H: 0(N*send) with 1(N*irecv, waitall)
+ * I: 0(2*N*send, 2*N*Irecv, Waitall) with
+ * 1(N*irecv, waitall, N*isend, N*waitany) with
+ * 2(N*irecv, N*waitany, N*isend, waitall)
+ * J: 0(N*isend, N*test, N*wait) with (N*irecv, N*test, N*wait)
+ s*/
int N = 13;
int tag = 12345;
MPI_Request req[2 * N];
MPI_Status sta[2 * N];
int *r = (int *) malloc(sizeof(int) * DATATOSENT);
- if (A) {
- TRACE_smpi_set_category("A");
- MPI_Isend(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD, &request);
- MPI_Wait(&request, &status);
- }
+
+ /** Test A */
+ TRACE_smpi_set_category("A");
+ MPI_Isend(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD, &request);
+ MPI_Wait(&request, &status);
MPI_Barrier(MPI_COMM_WORLD);
- if (B) {
- TRACE_smpi_set_category("B");
- MPI_Send(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD);
+ /** Test B */
+ TRACE_smpi_set_category("B");
+ MPI_Send(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD);
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /** Test C */
+ TRACE_smpi_set_category("C");
+ for (i = 0; i < N; i++) {
+ MPI_Isend(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD, &req[i]);
+ }
+ for (i = 0; i < N; i++) {
+ MPI_Wait(&req[i], &sta[i]);
}
MPI_Barrier(MPI_COMM_WORLD);
- if (C) {
- TRACE_smpi_set_category("C");
- for (i = 0; i < N; i++) {
- MPI_Isend(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD, &req[i]);
- }
- for (i = 0; i < N; i++) {
- MPI_Wait(&req[i], &sta[i]);
- }
+ TRACE_smpi_set_category("D");
+ for (i = 0; i < N; i++) {
+ MPI_Isend(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD, &req[i]);
+ }
+ for (i = 0; i < N; i++) {
+ int completed;
+ MPI_Waitany(N, req, &completed, sta);
}
MPI_Barrier(MPI_COMM_WORLD);
- if (D) {
- TRACE_smpi_set_category("D");
- for (i = 0; i < N; i++) {
- MPI_Isend(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD, &req[i]);
- }
- for (i = 0; i < N; i++) {
- int completed;
- MPI_Waitany(N, req, &completed, sta);
- }
+ TRACE_smpi_set_category("E");
+ for (i = 0; i < N; i++) {
+ MPI_Send(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD);
}
MPI_Barrier(MPI_COMM_WORLD);
- if (E) {
- TRACE_smpi_set_category("E");
- for (i = 0; i < N; i++) {
- MPI_Send(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD);
- }
+ TRACE_smpi_set_category("F");
+ for (i = 0; i < N; i++) {
+ MPI_Send(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD);
}
MPI_Barrier(MPI_COMM_WORLD);
- if (F) {
- TRACE_smpi_set_category("F");
- for (i = 0; i < N; i++) {
- MPI_Send(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD);
- }
+ TRACE_smpi_set_category("G");
+ for (i = 0; i < N; i++) {
+ MPI_Isend(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD, &req[i]);
}
+ MPI_Waitall(N, req, sta);
MPI_Barrier(MPI_COMM_WORLD);
- if (G) {
- TRACE_smpi_set_category("G");
- for (i = 0; i < N; i++) {
- MPI_Isend(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD, &req[i]);
- }
- MPI_Waitall(N, req, sta);
+ TRACE_smpi_set_category("H");
+ for (i = 0; i < N; i++) {
+ MPI_Send(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD);
}
MPI_Barrier(MPI_COMM_WORLD);
- if (H) {
- TRACE_smpi_set_category("H");
- for (i = 0; i < N; i++) {
+ TRACE_smpi_set_category("I");
+ for (i = 0; i < 2 * N; i++) {
+ if (i < N) {
+ MPI_Send(r, DATATOSENT, MPI_INT, 2, tag, MPI_COMM_WORLD);
+ } else {
MPI_Send(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD);
}
}
MPI_Barrier(MPI_COMM_WORLD);
-
- if (I) {
- TRACE_smpi_set_category("I");
- for (i = 0; i < 2 * N; i++) {
- if (i < N) {
- MPI_Send(r, DATATOSENT, MPI_INT, 2, tag, MPI_COMM_WORLD);
- } else {
- MPI_Send(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD);
- }
- }
- MPI_Barrier(MPI_COMM_WORLD);
- for (i = 0; i < 2 * N; i++) {
- if (i < N) {
- MPI_Irecv(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD,
- &req[i]);
- } else {
- MPI_Irecv(r, DATATOSENT, MPI_INT, 2, tag, MPI_COMM_WORLD,
- &req[i]);
- }
+ for (i = 0; i < 2 * N; i++) {
+ if (i < N) {
+ MPI_Irecv(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD, &req[i]);
+ } else {
+ MPI_Irecv(r, DATATOSENT, MPI_INT, 2, tag, MPI_COMM_WORLD, &req[i]);
}
- MPI_Waitall(2 * N, req, sta);
}
+ MPI_Waitall(2 * N, req, sta);
MPI_Barrier(MPI_COMM_WORLD);
- if (J) {
- TRACE_smpi_set_category("J");
- for (i = 0; i < N; i++) {
- MPI_Isend(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD, &req[i]);
- }
- for (i = 0; i < N; i++) {
- int flag;
- MPI_Test(&req[i], &flag, &sta[i]);
- }
- for (i = 0; i < N; i++) {
- MPI_Wait(&req[i], &sta[i]);
- }
+ TRACE_smpi_set_category("J");
+ for (i = 0; i < N; i++) {
+ MPI_Isend(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD, &req[i]);
+ }
+ for (i = 0; i < N; i++) {
+ int flag;
+ MPI_Test(&req[i], &flag, &sta[i]);
+ }
+ for (i = 0; i < N; i++) {
+ MPI_Wait(&req[i], &sta[i]);
}
free(r);
/////////////////////////////////////////
MPI_Status sta[N];
int *r = (int *) malloc(sizeof(int) * DATATOSENT);
- if (A) {
- TRACE_smpi_set_category("A");
- MPI_Recv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &status);
- }
+ TRACE_smpi_set_category("A");
+ MPI_Recv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &status);
MPI_Barrier(MPI_COMM_WORLD);
- if (B) {
- TRACE_smpi_set_category("B");
- MPI_Irecv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &request);
- MPI_Wait(&request, &status);
- }
+ TRACE_smpi_set_category("B");
+ MPI_Irecv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &request);
+ MPI_Wait(&request, &status);
MPI_Barrier(MPI_COMM_WORLD);
- if (C) {
- TRACE_smpi_set_category("C");
- for (i = 0; i < N; i++) {
- MPI_Recv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &sta[i]);
- }
+ TRACE_smpi_set_category("C");
+ for (i = 0; i < N; i++) {
+ MPI_Recv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &sta[i]);
}
MPI_Barrier(MPI_COMM_WORLD);
- if (D) {
- TRACE_smpi_set_category("D");
- for (i = 0; i < N; i++) {
- MPI_Recv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &sta[i]);
- }
+ TRACE_smpi_set_category("D");
+ for (i = 0; i < N; i++) {
+ MPI_Recv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &sta[i]);
}
MPI_Barrier(MPI_COMM_WORLD);
- if (E) {
- TRACE_smpi_set_category("E");
- for (i = 0; i < N; i++) {
- MPI_Irecv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &req[i]);
- }
- for (i = 0; i < N; i++) {
- MPI_Wait(&req[i], &sta[i]);
- }
+ TRACE_smpi_set_category("E");
+ for (i = 0; i < N; i++) {
+ MPI_Irecv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &req[i]);
+ }
+ for (i = 0; i < N; i++) {
+ MPI_Wait(&req[i], &sta[i]);
}
MPI_Barrier(MPI_COMM_WORLD);
- if (F) {
- TRACE_smpi_set_category("F");
- for (i = 0; i < N; i++) {
- MPI_Irecv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &req[i]);
- }
- for (i = 0; i < N; i++) {
- int completed;
- MPI_Waitany(N, req, &completed, sta);
- }
+ TRACE_smpi_set_category("F");
+ for (i = 0; i < N; i++) {
+ MPI_Irecv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &req[i]);
+ }
+ for (i = 0; i < N; i++) {
+ int completed;
+ MPI_Waitany(N, req, &completed, sta);
}
MPI_Barrier(MPI_COMM_WORLD);
- if (G) {
- TRACE_smpi_set_category("G");
- for (i = 0; i < N; i++) {
- MPI_Recv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &sta[i]);
- }
+ TRACE_smpi_set_category("G");
+ for (i = 0; i < N; i++) {
+ MPI_Recv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &sta[i]);
}
MPI_Barrier(MPI_COMM_WORLD);
- if (H) {
- TRACE_smpi_set_category("H");
- for (i = 0; i < N; i++) {
- MPI_Irecv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &req[i]);
- }
- MPI_Waitall(N, req, sta);
+ TRACE_smpi_set_category("H");
+ for (i = 0; i < N; i++) {
+ MPI_Irecv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &req[i]);
}
+ MPI_Waitall(N, req, sta);
MPI_Barrier(MPI_COMM_WORLD);
- if (I) {
- TRACE_smpi_set_category("I");
- for (i = 0; i < N; i++) {
- MPI_Irecv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &req[i]);
- }
- MPI_Waitall(N, req, sta);
+ TRACE_smpi_set_category("I");
+ for (i = 0; i < N; i++) {
+ MPI_Irecv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &req[i]);
+ }
+ MPI_Waitall(N, req, sta);
- MPI_Barrier(MPI_COMM_WORLD);
- for (i = 0; i < N; i++) {
- MPI_Isend(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &req[i]);
- }
- MPI_Waitall(N, req, sta);
-// for (i = 0; i < N; i++){
-// MPI_Wait (&req[i], &sta[i]);
-// }
+ MPI_Barrier(MPI_COMM_WORLD);
+ for (i = 0; i < N; i++) {
+ MPI_Isend(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &req[i]);
}
+ MPI_Waitall(N, req, sta);
MPI_Barrier(MPI_COMM_WORLD);
- if (J) {
- TRACE_smpi_set_category("J");
- for (i = 0; i < N; i++) {
- MPI_Irecv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &req[i]);
- }
- for (i = 0; i < N; i++) {
- int flag;
- MPI_Test(&req[i], &flag, &sta[i]);
- }
- for (i = 0; i < N; i++) {
- MPI_Wait(&req[i], &sta[i]);
- }
+ TRACE_smpi_set_category("J");
+ for (i = 0; i < N; i++) {
+ MPI_Irecv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &req[i]);
+ }
+ for (i = 0; i < N; i++) {
+ int flag;
+ MPI_Test(&req[i], &flag, &sta[i]);
+ }
+ for (i = 0; i < N; i++) {
+ MPI_Wait(&req[i], &sta[i]);
}
free(r);
/////////////////////////////////////////
////////////////// RANK 2
///////////////////////////////////
} else if (rank == 2) {
-// MPI_Request request;
-// MPI_Status status;
MPI_Request req[N];
MPI_Status sta[N];
int *r = (int *) malloc(sizeof(int) * DATATOSENT);
- if (A) {
- }
MPI_Barrier(MPI_COMM_WORLD);
- if (B) {
- }
MPI_Barrier(MPI_COMM_WORLD);
- if (C) {
- }
MPI_Barrier(MPI_COMM_WORLD);
- if (D) {
- }
MPI_Barrier(MPI_COMM_WORLD);
- if (E) {
- }
MPI_Barrier(MPI_COMM_WORLD);
- if (F) {
- }
MPI_Barrier(MPI_COMM_WORLD);
- if (G) {
- }
MPI_Barrier(MPI_COMM_WORLD);
- if (H) {
+ MPI_Barrier(MPI_COMM_WORLD);
+ TRACE_smpi_set_category("I");
+ for (i = 0; i < N; i++) {
+ MPI_Irecv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &req[i]);
+ }
+ for (i = 0; i < N; i++) {
+ int completed;
+ MPI_Waitany(N, req, &completed, sta);
}
MPI_Barrier(MPI_COMM_WORLD);
- if (I) {
- TRACE_smpi_set_category("I");
- for (i = 0; i < N; i++) {
- MPI_Irecv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &req[i]);
- }
- for (i = 0; i < N; i++) {
- int completed;
- MPI_Waitany(N, req, &completed, sta);
- }
- MPI_Barrier(MPI_COMM_WORLD);
- for (i = 0; i < N; i++) {
- MPI_Send(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD);
- }
+ for (i = 0; i < N; i++) {
+ MPI_Send(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD);
}
MPI_Barrier(MPI_COMM_WORLD);
- if (J) {
- }
free(r);
}
MPI_Barrier(MPI_COMM_WORLD);
XBT_PUBLIC(void) simcall_run_kernel(std::function<void()> const& code);
+template<class F> inline
+void simcall_run_kernel(F& f)
+{
+ simcall_run_kernel(std::function<void()>(std::ref(f)));
+}
+
namespace simgrid {
namespace simix {
if (SIMIX_is_maestro())
return std::forward<F>(code)();
- // If we are in the application, pass the code to the maestro which is
+ // If we are in the application, pass the code to the maestro which
// executes it for us and reports the result. We use a std::future which
// conveniently handles the success/failure value for us.
typedef typename std::result_of<F()>::type R;
- std::promise<R> promise;
+ simgrid::xbt::Result<R> result;
simcall_run_kernel([&]{
xbt_assert(SIMIX_is_maestro(), "Not in maestro");
- simgrid::xbt::fulfillPromise(promise, std::forward<F>(code));
+ simgrid::xbt::fulfillPromise(result, std::forward<F>(code));
});
- return promise.get_future().get();
+ return result.get();
}
class Context;
#ifndef XBT_FUTURE_HPP
#define XBT_FUTURE_HPP
-#include <future>
+#include <cstddef>
+
#include <utility>
#include <exception>
namespace simgrid {
namespace xbt {
-/** Fulfill a promise by executing a given code */
+/** A value or an exception
+ *
+ * The API is similar to the one of future and promise.
+ **/
+template<class T>
+class Result {
+ enum class ResultStatus {
+ invalid,
+ value,
+ exception,
+ };
+public:
+ Result() {}
+ ~Result() { this->reset(); }
+
+ // Copy (if T is copyable) and move:
+ Result(Result const& that)
+ {
+ (*this) = that;
+ }
+ Result& operator=(Result const& that)
+ {
+ this->reset();
+ switch (that.status_) {
+ case ResultStatus::invalid:
+ break;
+ case ResultStatus::valid:
+ new (&value_) T(that.value);
+ break;
+ case ResultStatus::exception:
+ new (&exception_) T(that.exception);
+ break;
+ }
+ return *this;
+ }
+ Result(Result&& that)
+ {
+ *this = std::move(that);
+ }
+ Result& operator=(Result&& that)
+ {
+ this->reset();
+ switch (that.status_) {
+ case ResultStatus::invalid:
+ break;
+ case ResultStatus::valid:
+ new (&value_) T(std::move(that.value));
+ that.value.~T();
+ break;
+ case ResultStatus::exception:
+ new (&exception_) T(std::move(that.exception));
+ that.exception.~exception_ptr();
+ break;
+ }
+ that.status_ = ResultStatus::invalid;
+ return *this;
+ }
+
+ bool is_valid() const
+ {
+ return status_ != ResultStatus::invalid;
+ }
+ void reset()
+ {
+ switch (status_) {
+ case ResultStatus::invalid:
+ break;
+ case ResultStatus::value:
+ value_.~T();
+ break;
+ case ResultStatus::exception:
+ exception_.~exception_ptr();
+ break;
+ }
+ status_ = ResultStatus::invalid;
+ }
+ void set_exception(std::exception_ptr e)
+ {
+ this->reset();
+ new (&exception_) std::exception_ptr(std::move(e));
+ status_ = ResultStatus::exception;
+ }
+ void set_value(T&& value)
+ {
+ this->reset();
+ new (&value_) T(std::move(value));
+ status_ = ResultStatus::value;
+ }
+ void set_value(T const& value)
+ {
+ this->reset();
+ new (&value_) T(value);
+ status_ = ResultStatus::value;
+ }
+
+ /** Extract the value from the future
+ *
+ * After this the value is invalid.
+ **/
+ T get()
+ {
+ switch (status_) {
+ case ResultStatus::invalid:
+ default:
+ throw std::logic_error("Invalid result");
+ case ResultStatus::value: {
+ T value = std::move(value_);
+ value_.~T();
+ status_ = ResultStatus::invalid;
+ return std::move(value);
+ }
+ case ResultStatus::exception: {
+ std::exception_ptr exception = std::move(exception_);
+ exception_.~exception_ptr();
+ status_ = ResultStatus::invalid;
+ std::rethrow_exception(std::move(exception));
+ break;
+ }
+ }
+ }
+private:
+ ResultStatus status_ = ResultStatus::invalid;
+ union {
+ T value_;
+ std::exception_ptr exception_;
+ };
+};
+
+template<>
+class Result<void> : public Result<std::nullptr_t>
+{
+public:
+ void set_value()
+ {
+ Result<std::nullptr_t>::set_value(nullptr);
+ }
+ void get()
+ {
+ Result<std::nullptr_t>::get();
+ }
+};
+
+template<class T>
+class Result<T&> : public Result<std::reference_wrapper<T>>
+{
+public:
+ void set_value(T& value)
+ {
+ Result<std::reference_wrapper<T>>::set_value(std::ref(value));
+ }
+ T& get()
+ {
+ return Result<std::reference_wrapper<T>>::get();
+ }
+};
+
+/** Execute some code and set a promise or result accordingly
+ *
+ * We might need this when working with generic code because
+ * the trivial implementation does not work with void (before C++1z).
+ *
+ * @param code What we want to do
+ * @param promise Where to want to store the result
+ */
template<class R, class F>
-void fulfillPromise(std::promise<R>& promise, F code)
+auto fulfillPromise(R& promise, F&& code)
+-> decltype(promise.set_value(code()))
{
try {
promise.set_value(code());
}
}
-/** Fulfill a promise by executing a given code
- *
- * This is a special version for `std::promise<void>` because the default
- * version does not compile in this case.
- */
-template<class F>
-void fulfillPromise(std::promise<void>& promise, F code)
+template<class P, class F>
+auto fulfillPromise(P& promise, F&& code)
+-> decltype(promise.set_value())
{
try {
(code)();
/**
* Starts listening for receiving a task from an asynchronous communication
* @param mailbox
+ * @return a Comm handler
*/
public static native Comm irecv(String mailbox);
/**
* Retrieves next task from the mailbox identified by the specified name
*
* @param mailbox
+ * @return a Task
*/
public static Task receive(String mailbox) throws TransferFailureException, HostFailureException, TimeoutException {
*
* @param mailbox
* @param timeout
+ * @return a Task
*/
public static Task receive(String mailbox, double timeout) throws TransferFailureException, HostFailureException, TimeoutException {
return receive(mailbox, timeout, null);
*
* @param mailbox
* @param host
+ * @return a Task
*/
public static Task receive(String mailbox, Host host) throws TransferFailureException, HostFailureException, TimeoutException {
* @param mailbox
* @param timeout
* @param host
+ * @return a Task
*/
- public native static Task receive(String mailbox, double timeout, Host host) throws TransferFailureException, HostFailureException, TimeoutException;
+ public static native Task receive(String mailbox, double timeout, Host host) throws TransferFailureException, HostFailureException, TimeoutException;
/**
* Starts listening for receiving a task from an asynchronous communication with a capped rate
* @param mailbox
+ * @return a Comm handler
*/
public static native Comm irecvBounded(String mailbox, double rate);
/**
* Retrieves next task from the mailbox identified by the specified name with a capped rate
*
* @param mailbox
+ * @return a Task
*/
public static Task receiveBounded(String mailbox, double rate) throws TransferFailureException, HostFailureException, TimeoutException {
*
* @param mailbox
* @param timeout
+ * @return a Task
*/
public static Task receiveBounded(String mailbox, double timeout, double rate) throws TransferFailureException, HostFailureException, TimeoutException {
return receiveBounded(mailbox, timeout, null, rate);
*
* @param mailbox
* @param host
+ * @return a Task
*/
public static Task receiveBounded(String mailbox, Host host, double rate) throws TransferFailureException, HostFailureException, TimeoutException {
* @param mailbox
* @param timeout
* @param host
+ * @return a Task
*/
- public native static Task receiveBounded(String mailbox, double timeout, Host host, double rate) throws TransferFailureException, HostFailureException, TimeoutException;
+ public static native Task receiveBounded(String mailbox, double timeout, Host host, double rate) throws TransferFailureException, HostFailureException, TimeoutException;
/**
* Tests whether there is a pending communication on the mailbox identified by the specified alias, and who sent it
*/
- public native static int listenFrom(String mailbox);
+ public static native int listenFrom(String mailbox);
/**
* Listen whether there is a task waiting (either for a send or a recv) on the mailbox identified by the specified alias
*/
- public native static boolean listen(String mailbox);
+ public static native boolean listen(String mailbox);
/**
* Class initializer, to initialize various JNI stuff
* under the terms of the license (GNU LGPL) which comes with this package. */
package org.simgrid.msg;
-import java.util.Vector;
+import java.util.ArrayList;
public class VM extends Host{
// Please note that we are not declaring a new bind variable
// GetByName is inherited from the super class Host
- private static Vector<VM> vms= new Vector<>();
+ private static ArrayList<VM> vms= new ArrayList<>();
private Host currentHost;
/* Constructors / destructors */
* Do --help on any simgrid binary to see the list of currently existing configuration variables, and
* see Section @ref options.
*
- * Example:
- * SD_config("host/model","default");
+ * Example: SD_config("host/model","default");
*/
void SD_config(const char *key, const char *value){
xbt_assert(sd_global,"ERROR: Please call SD_init() before using SD_config()");
/* let's see which tasks are done */
xbt_dynar_foreach(all_existing_models, iter, model) {
while ((action = surf_model_extract_done_action_set(model))) {
- task = (SD_task_t) action->getData();
+ task = static_cast<SD_task_t>(action->getData());
task->start_time = task->surf_action->getStartTime();
task->finish_time = surf_get_clock();
task->surf_action = NULL;
/* the state has changed. Add it only if it's the first change */
- if (!xbt_dynar_member(sd_global->return_set, &task)) {
+ if (xbt_dynar_member(sd_global->return_set, &task) == 0) {
xbt_dynar_push(sd_global->return_set, &task);
}
XBT_DEBUG("Released a dependency on %s: %d remain(s). Became schedulable if %d=0",
SD_task_get_name(dst), dst->unsatisfied_dependencies, dst->is_not_ready);
- if (!(dst->unsatisfied_dependencies)) {
+ if (dst->unsatisfied_dependencies == 0) {
if (SD_task_get_state(dst) == SD_SCHEDULED)
SD_task_set_state(dst, SD_RUNNABLE);
else
SD_task_set_state(dst, SD_SCHEDULABLE);
}
- if (SD_task_get_state(dst) == SD_NOT_SCHEDULED && !(dst->is_not_ready)) {
+ if (SD_task_get_state(dst) == SD_NOT_SCHEDULED && dst->is_not_ready == 0) {
SD_task_set_state(dst, SD_SCHEDULABLE);
}
XBT_DEBUG("%s is a transfer, %s may be ready now if %d=0",
SD_task_get_name(dst), SD_task_get_name(comm_dst), comm_dst->is_not_ready);
- if (!(comm_dst->is_not_ready)) {
+ if (comm_dst->is_not_ready == 0) {
SD_task_set_state(comm_dst, SD_SCHEDULABLE);
}
}
/* let's see which tasks have just failed */
while ((action = surf_model_extract_failed_action_set(model))) {
- task = (SD_task_t) action->getData();
+ task = static_cast<SD_task_t>(action->getData());
task->start_time = task->surf_action->getStartTime();
task->finish_time = surf_get_clock();
XBT_VERB("Task '%s' failed", SD_task_get_name(task));
}
}
- if (!sd_global->watch_point_reached && how_long<0){
- if (!xbt_dynar_is_empty(sd_global->initial_task_set)) {
- XBT_WARN("Simulation is finished but %lu tasks are still not done",
- xbt_dynar_length(sd_global->initial_task_set));
- static const char* state_names[] =
- { "SD_NOT_SCHEDULED", "SD_SCHEDULABLE", "SD_SCHEDULED", "SD_RUNNABLE", "SD_RUNNING", "SD_DONE","SD_FAILED" };
- xbt_dynar_foreach(sd_global->initial_task_set, iter, task){
- XBT_WARN("%s is in %s state", SD_task_get_name(task), state_names[SD_task_get_state(task)]);
- }
+ if (!sd_global->watch_point_reached && how_long<0 &&
+ xbt_dynar_is_empty(sd_global->initial_task_set) == 0) {
+ XBT_WARN("Simulation is finished but %lu tasks are still not done",
+ xbt_dynar_length(sd_global->initial_task_set));
+ static const char* state_names[] =
+ { "SD_NOT_SCHEDULED", "SD_SCHEDULABLE", "SD_SCHEDULED", "SD_RUNNABLE", "SD_RUNNING", "SD_DONE","SD_FAILED" };
+ xbt_dynar_foreach(sd_global->initial_task_set, iter, task){
+ XBT_WARN("%s is in %s state", SD_task_get_name(task), state_names[SD_task_get_state(task)]);
}
}
}
/** @brief Returns the current clock, in seconds */
-double SD_get_clock(void) {
+double SD_get_clock() {
return surf_get_clock();
}
*
* \see SD_init(), SD_task_destroy()
*/
-void SD_exit(void)
+void SD_exit()
{
TRACE_surf_resource_utilization_release();
{
/* we must destroy the dependencies carefuly (with SD_dependency_remove) because each one is stored twice */
SD_dependency_t dependency;
- while (!xbt_dynar_is_empty(task->tasks_before)) {
+ while (xbt_dynar_is_empty(task->tasks_before) == 0) {
xbt_dynar_get_cpy(task->tasks_before, 0, &dependency);
SD_task_dependency_remove(dependency->src, dependency->dst);
}
- while (!xbt_dynar_is_empty(task->tasks_after)) {
+ while (xbt_dynar_is_empty(task->tasks_after) == 0) {
xbt_dynar_get_cpy(task->tasks_after, 0, &dependency);
SD_task_dependency_remove(dependency->src, dependency->dst);
}
xbt_free(task->flops_amount);
xbt_free(task->bytes_amount);
- task->flops_amount = task->bytes_amount = NULL;
+ task->flops_amount = NULL;
+ task->bytes_amount = NULL;
}
-void* SD_task_new_f(void)
+void* SD_task_new_f()
{
SD_task_t task = xbt_new0(s_SD_task_t, 1);
task->tasks_before = xbt_dynar_new(sizeof(SD_dependency_t), NULL);
void SD_task_recycle_f(void *t)
{
- SD_task_t task = (SD_task_t) t;
+ SD_task_t task = static_cast<SD_task_t>(t);
/* Reset the content */
task->kind = SD_TASK_NOT_TYPED;
void SD_task_free_f(void *t)
{
- SD_task_t task = (SD_task_t)t;
+ SD_task_t task = static_cast<SD_task_t>(t);
xbt_dynar_free(&task->tasks_before);
xbt_dynar_free(&task->tasks_after);
*/
SD_task_t SD_task_create(const char *name, void *data, double amount)
{
- SD_task_t task = (SD_task_t)xbt_mallocator_get(sd_global->task_mallocator);
+ SD_task_t task = static_cast<SD_task_t>(xbt_mallocator_get(sd_global->task_mallocator));
/* general information */
task->data = data; /* user data */
return task->alpha;
}
-
/**
* \brief Returns the remaining amount work to do till the completion of a task
*
if (task->kind == SD_TASK_COMP_PAR_AMDAHL)
XBT_INFO(" - alpha: %.2f", task->alpha);
XBT_INFO(" - Dependencies to satisfy: %d", task->unsatisfied_dependencies);
- if (!xbt_dynar_is_empty(task->tasks_before)) {
+ if (xbt_dynar_is_empty(task->tasks_before) == 0) {
XBT_INFO(" - pre-dependencies:");
xbt_dynar_foreach(task->tasks_before, counter, dependency) {
XBT_INFO(" %s", SD_task_get_name(dependency->src));
}
}
- if (!xbt_dynar_is_empty(task->tasks_after)) {
+ if (xbt_dynar_is_empty(task->tasks_after)== 0) {
XBT_INFO(" - post-dependencies:");
xbt_dynar_foreach(task->tasks_after, counter, dependency) {
XBT_INFO(" %s", SD_task_get_name(dependency->dst));
{
unsigned int counter;
SD_dependency_t dependency;
- FILE *fout = (FILE*)out;
+ FILE *fout = static_cast<FILE*>(out);
fprintf(fout, " T%p [label=\"%.20s\"", task, task->name);
switch (task->kind) {
case SD_TASK_COMM_E2E:
*/
void SD_task_dependency_add(const char *name, void *data, SD_task_t src, SD_task_t dst)
{
- int found = 0;
+ bool found = false;
SD_dependency_t dependency;
unsigned long length = xbt_dynar_length(src->tasks_after);
void SD_task_dependency_remove(SD_task_t src, SD_task_t dst)
{
unsigned long length;
- int found = 0;
+ bool found = false;
SD_dependency_t dependency;
/* remove the dependency from src->tasks_after */
xbt_dynar_get_cpy(src->tasks_after, i, &dependency);
if (dependency->dst == dst) {
xbt_dynar_remove_at(src->tasks_after, i, NULL);
- found = 1;
+ found = true;
}
}
if (!found)
/* remove the dependency from dst->tasks_before */
length = xbt_dynar_length(dst->tasks_before);
- found = 0;
+ found = false;
for (unsigned long i = 0; i < length && !found; i++) {
xbt_dynar_get_cpy(dst->tasks_before, i, &dependency);
__SD_task_dependency_destroy(dependency);
dst->unsatisfied_dependencies--;
dst->is_not_ready--;
- found = 1;
+ found = true;
}
}
/* should never happen... */
*/
void *SD_task_dependency_get_data(SD_task_t src, SD_task_t dst)
{
- int found = 0;
+ bool found = false;
SD_dependency_t dependency;
unsigned long length = xbt_dynar_length(src->tasks_after);
task->rate = rate;
if (flops_amount) {
- task->flops_amount = (double*)xbt_realloc(task->flops_amount, sizeof(double) * host_count);
+ task->flops_amount = static_cast<double*>(xbt_realloc(task->flops_amount, sizeof(double) * host_count));
memcpy(task->flops_amount, flops_amount, sizeof(double) * host_count);
} else {
xbt_free(task->flops_amount);
int communication_nb = host_count * host_count;
if (bytes_amount) {
- task->bytes_amount = (double*)xbt_realloc(task->bytes_amount, sizeof(double) * communication_nb);
+ task->bytes_amount = static_cast<double*>(xbt_realloc(task->bytes_amount, sizeof(double) * communication_nb));
memcpy(task->bytes_amount, bytes_amount, sizeof(double) * communication_nb);
} else {
xbt_free(task->bytes_amount);
task->bytes_amount = NULL;
}
- task->host_list = (sg_host_t*) xbt_realloc(task->host_list, sizeof(sg_host_t) * host_count);
+ task->host_list = static_cast<sg_host_t*>(xbt_realloc(task->host_list, sizeof(sg_host_t) * host_count));
memcpy(task->host_list, workstation_list, sizeof(sg_host_t) * host_count);
SD_task_do_schedule(task);
switch (task->kind) {
case SD_TASK_COMP_PAR_AMDAHL:
SD_task_distribute_comp_amdahl(task, count);
+ /* no break */
case SD_TASK_COMM_E2E:
case SD_TASK_COMP_SEQ:
xbt_assert(task->host_count == count, "Got %d locations, but were expecting %d locations", count,task->host_count);
typedef struct SD_global {
xbt_mallocator_t task_mallocator; /* to not remalloc new tasks */
- int watch_point_reached; /* has a task just reached a watch point? */
+ bool watch_point_reached; /* has a task just reached a watch point? */
xbt_dynar_t initial_task_set;
xbt_dynar_t executable_task_set;
XBT_PRIVATE void smpi_global_init(void);
XBT_PRIVATE void smpi_global_destroy(void);
XBT_PRIVATE double smpi_mpi_wtime(void);
+XBT_PRIVATE void smpi_mpi_init(void);
XBT_PRIVATE bool is_datatype_valid(MPI_Datatype datatype);
static simgrid::config::Flag<double> smpi_wtime_sleep(
"smpi/wtime", "Minimum time to inject inside a call to MPI_Wtime", 0.0);
+static simgrid::config::Flag<double> smpi_init_sleep(
+ "smpi/init", "Time to inject inside a call to MPI_Init", 0.0);
static simgrid::config::Flag<double> smpi_iprobe_sleep(
"smpi/iprobe", "Minimum time to inject inside a call to MPI_Iprobe", 1e-4);
static simgrid::config::Flag<double> smpi_test_sleep(
return current;
}
+void smpi_mpi_init() {
+ if(smpi_init_sleep > 0)
+ simcall_process_sleep(smpi_init_sleep);
+}
+
double smpi_mpi_wtime(){
double time;
if (smpi_process_initialized() != 0 &&
void smpi_mpi_iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
- MPI_Request request =build_request(NULL, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE :
+ MPI_Request request = build_request(NULL, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE :
smpi_group_index(smpi_comm_group(comm), source), smpi_comm_rank(comm), tag, comm, PERSISTENT | RECV);
// to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls
print_request("New iprobe", request);
// We have to test both mailboxes as we don't know if we will receive one one or another
- if (xbt_cfg_get_int("smpi/async-small-thresh")>0){
+ if (xbt_cfg_get_int("smpi/async-small-thresh") > 0){
mailbox = smpi_process_mailbox_small();
- XBT_DEBUG("trying to probe the perm recv mailbox");
+ XBT_DEBUG("Trying to probe the perm recv mailbox");
request->action = simcall_comm_iprobe(mailbox, 0, request->src, request->tag, &match_recv, static_cast<void*>(request));
}
- if (request->action==NULL){
- mailbox = smpi_process_mailbox();
- XBT_DEBUG("trying to probe the other mailbox");
- request->action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, static_cast<void*>(request));
+
+ if (request->action == NULL){
+ mailbox = smpi_process_mailbox();
+ XBT_DEBUG("trying to probe the other mailbox");
+ request->action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, static_cast<void*>(request));
}
if (request->action){
simgrid::simix::Comm *sync_comm = static_cast<simgrid::simix::Comm*>(request->action);
- MPI_Request req = static_cast<MPI_Request>(sync_comm->src_data);
+ MPI_Request req = static_cast<MPI_Request>(sync_comm->src_data);
*flag = 1;
- if(status != MPI_STATUS_IGNORE && (req->flags & PREPARED)==0) {
+ if(status != MPI_STATUS_IGNORE && (req->flags & PREPARED) == 0) {
status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(comm), req->src);
status->MPI_TAG = req->tag;
status->MPI_ERROR = MPI_SUCCESS;
status->count = req->real_size;
}
- nsleeps=1;//reset the number of sleeps we will do next time
+ nsleeps = 1;//reset the number of sleeps we will do next time
}
else {
*flag = 0;
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
smpi_bench_begin();
}
+
+ smpi_mpi_init();
+
return MPI_SUCCESS;
}
examples/platforms/torus_cluster.xml
examples/platforms/two_clusters.xml
examples/platforms/two_hosts.xml
- examples/platforms/two_hosts_platform.xml
examples/platforms/two_hosts_platform_shared.xml
examples/platforms/two_hosts_platform_with_availability.xml
examples/platforms/two_hosts_platform_with_availability_included.xml