-/* Copyright (c) 2012. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2012-2013. The SimGrid Team.
+ * All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "msg_private.h"
#include "xbt/sysdep.h"
#include "xbt/log.h"
+#include "simgrid/platf.h"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(msg_vm, msg,
"Cloud-oriented parts of the MSG API");
/** \ingroup m_host_management
* \brief Change the value of a given host property
*
- * \param host a host
+ * \param vm a vm
* \param name a property name
* \param value what to change the property to
* \param free_ctn the freeing function to use to kill the value on need
const char *src_pm_name = argv[2];
const char *dst_pm_name = argv[3];
msg_vm_t vm = MSG_get_host_by_name(vm_name);
- msg_vm_t dst_pm = MSG_get_host_by_name(dst_pm_name);
+ msg_host_t src_pm = MSG_get_host_by_name(src_pm_name);
+ msg_host_t dst_pm = MSG_get_host_by_name(dst_pm_name);
s_ws_params_t params;
}
+ /* deinstall the current affinity setting */
+ simcall_vm_set_affinity(vm, src_pm, 0);
+
simcall_vm_migrate(vm, dst_pm);
simcall_vm_resume(vm);
+ /* install the affinity setting of the VM on the destination pm */
+ {
+ msg_host_priv_t priv = msg_host_resource_priv(vm);
+
+ unsigned long affinity_mask = (unsigned long) xbt_dict_get_or_null_ext(priv->affinity_mask_db, (char *) dst_pm, sizeof(msg_host_t));
+ simcall_vm_set_affinity(vm, dst_pm, affinity_mask);
+ XBT_INFO("set affinity(0x%04lx@%s) for %s", affinity_mask, MSG_host_get_name(dst_pm), MSG_host_get_name(vm));
+ }
+
{
char *task_name = get_mig_task_name(vm_name, src_pm_name, dst_pm_name, 4);
dirty_page_t dp = NULL;
xbt_dict_foreach(priv->dp_objs, cursor, key, dp) {
double remaining = MSG_task_get_remaining_computation(dp->task);
- double clock = MSG_get_clock();
+
+ double clock = MSG_get_clock();
// total += calc_updated_pages(key, vm, dp, remaining, clock);
total += get_computed(key, vm, dp, remaining, clock);
static void send_migration_data(const char *vm_name, const char *src_pm_name, const char *dst_pm_name,
- double size, char *mbox, int stage, int stage2_round, double mig_speed, double xfer_cpu_overhead)
+ sg_size_t size, char *mbox, int stage, int stage2_round, double mig_speed, double xfer_cpu_overhead)
{
char *task_name = get_mig_task_name(vm_name, src_pm_name, dst_pm_name, stage);
msg_task_t task = MSG_task_create(task_name, 0, size, NULL);
if (stage == 2){
- XBT_DEBUG("mig-stage%d.%d: sent %f duration %f actual_speed %f (target %f) cpu %f", stage, stage2_round, size, duration, actual_speed, mig_speed, cpu_utilization);}
+ XBT_DEBUG("mig-stage%d.%d: sent %llu duration %f actual_speed %f (target %f) cpu %f", stage, stage2_round, size, duration, actual_speed, mig_speed, cpu_utilization);}
else{
- XBT_DEBUG("mig-stage%d: sent %f duration %f actual_speed %f (target %f) cpu %f", stage, size, duration, actual_speed, mig_speed, cpu_utilization);
+ XBT_DEBUG("mig-stage%d: sent %llu duration %f actual_speed %f (target %f) cpu %f", stage, size, duration, actual_speed, mig_speed, cpu_utilization);
}
xbt_free(task_name);
}
static double send_stage1(msg_host_t vm, const char *src_pm_name, const char *dst_pm_name,
- long ramsize, double mig_speed, double xfer_cpu_overhead, double dp_rate, double dp_cap, double dpt_cpu_overhead)
+ sg_size_t ramsize, double mig_speed, double xfer_cpu_overhead, double dp_rate, double dp_cap, double dpt_cpu_overhead)
{
const char *vm_name = MSG_host_get_name(vm);
char *mbox = get_mig_mbox_src_dst(vm_name, src_pm_name, dst_pm_name);
- // const long chunksize = 1024 * 1024 * 100;
- const long chunksize = 1024L * 1024 * 100000;
- long remaining = ramsize;
+ // const long chunksize = (sg_size_t)1024 * 1024 * 100;
+ const sg_size_t chunksize = (sg_size_t)1024 * 1024 * 100000;
+ sg_size_t remaining = ramsize;
double computed_total = 0;
while (remaining > 0) {
- long datasize = chunksize;
+ sg_size_t datasize = chunksize;
if (remaining < chunksize)
datasize = remaining;
remaining -= datasize;
send_migration_data(vm_name, src_pm_name, dst_pm_name, datasize, mbox, 1, 0, mig_speed, xfer_cpu_overhead);
-
double computed = lookup_computed_flop_counts(vm, 1, 0);
computed_total += computed;
// launch_deferred_exec_process(vm, overhead, 10000);
// }
}
-
+ xbt_free(mbox);
return computed_total;
}
s_ws_params_t params;
simcall_host_get_params(vm, ¶ms);
- const long ramsize = params.ramsize;
+ const sg_size_t ramsize = params.ramsize;
const long devsize = params.devsize;
const int skip_stage1 = params.skip_stage1;
const int skip_stage2 = params.skip_stage2;
double clock_post_send = MSG_get_clock();
double bandwidth = ramsize / (clock_post_send - clock_prev_send);
threshold = get_threshold_value(bandwidth, max_downtime);
- XBT_INFO("actual banwdidth %f, threshold %f", bandwidth / 1024 / 1024, threshold);
+ XBT_INFO("actual banwdidth %f (MB/s), threshold %f", bandwidth / 1024 / 1024, threshold);
}
{
msg_task_t task = NULL;
msg_error_t ret = MSG_task_recv(&task, mbox_ctl);
+
xbt_assert(ret == MSG_OK);
char *expected_task_name = get_mig_task_name(sg_host_name(vm), sg_host_name(src_pm), sg_host_name(dst_pm), 4);
xbt_assert(strcmp(task->name, expected_task_name) == 0);
xbt_free(expected_task_name);
+ MSG_task_destroy(task);
}
xbt_free(mbox_ctl);
* For example,
* On PM0, there are Task1 and VM0.
* On VM0, there is Task2.
- * Now we bound 75% to Task1@PM0 and bound 25% to Task2@VM0.
+ * Now we bound 75% to Task1\@PM0 and bound 25% to Task2\@VM0.
* Then,
- * Task1@PM0 gets 50%.
- * Task2@VM0 gets 25%.
- * This is NOT 75% for Task1@PM0 and 25% for Task2@VM0, respectively.
+ * Task1\@PM0 gets 50%.
+ * Task2\@VM0 gets 25%.
+ * This is NOT 75% for Task1\@PM0 and 25% for Task2\@VM0, respectively.
*
* This is because a VM has the dummy CPU action in the PM layer. Putting a
* task on the VM does not affect the bound of the dummy CPU action. The bound
*
*
* 2.
- * Note that bound == 0 means no bound (i.e., unlimited).
+ * Note that bound == 0 means no bound (i.e., unlimited). But, if a host has
+ * multiple CPU cores, the CPU share of a computation task (or a VM) never
+ * exceeds the capacity of a CPU core.
*/
void MSG_vm_set_bound(msg_vm_t vm, double bound)
{
return simcall_vm_set_bound(vm, bound);
}
+
+
+/** @brief Set the CPU affinity of a given VM.
+ * @ingroup msg_VMs
+ *
+ * This function changes the CPU affinity of a given VM. Usage is the same as
+ * MSG_task_set_affinity(). See the MSG_task_set_affinity() for details.
+ */
+void MSG_vm_set_affinity(msg_vm_t vm, msg_host_t pm, unsigned long mask)
+{
+ msg_host_priv_t priv = msg_host_resource_priv(vm);
+
+ if (mask == 0)
+ xbt_dict_remove_ext(priv->affinity_mask_db, (char *) pm, sizeof(pm));
+ else
+ xbt_dict_set_ext(priv->affinity_mask_db, (char *) pm, sizeof(pm), (void *) mask, NULL);
+
+ msg_host_t pm_now = MSG_vm_get_pm(vm);
+ if (pm_now == pm) {
+ XBT_INFO("set affinity(0x%04lx@%s) for %s", mask, MSG_host_get_name(pm), MSG_host_get_name(vm));
+ simcall_vm_set_affinity(vm, pm, mask);
+ } else
+ XBT_INFO("set affinity(0x%04lx@%s) for %s (not active now)", mask, MSG_host_get_name(pm), MSG_host_get_name(vm));
+}