ChangeLog for SimGrid-java, before its integration into the main source tree.
+SimGrid (3.13) UNRELEASED; urgency=low
+
+ Backwards Compatibility breaks
+ - VM.setBound(int load) is now VM.setBound(double bound) to meet the MSG semantics. Use VM.getSpeed()*load/100 for the legacy behavior.
+
+
SimGrid-java (3.9) stable; urgency=low
-- 2013-01-30 Da SimGrid team <simgrid-devel@lists.gforge.inria.fr>
public void setLoad(int load){
if (load >0) {
- this.setBound(load);
+ this.setBound(this.getSpeed()*load/100);
// this.getDaemon().setLoad(load);
daemon.resume();
} else{
double task1_remain_prev = MSG_task_get_flops_amount(task1);
{
- const double cpu_speed = MSG_get_host_speed(pm0);
+ const double cpu_speed = MSG_host_get_speed(pm0);
int i = 0;
for (i = 0; i < 10; i++) {
double new_bound = (cpu_speed / 10) * i;
static void test_one_task(msg_host_t hostA)
{
- const double cpu_speed = MSG_get_host_speed(hostA);
+ const double cpu_speed = MSG_host_get_speed(hostA);
const double computation_amount = cpu_speed * 10;
const char *hostA_name = MSG_host_get_name(hostA);
static void test_two_tasks(msg_host_t hostA, msg_host_t hostB)
{
- const double cpu_speed = MSG_get_host_speed(hostA);
- xbt_assert(cpu_speed == MSG_get_host_speed(hostB));
+ const double cpu_speed = MSG_host_get_speed(hostA);
+ xbt_assert(cpu_speed == MSG_host_get_speed(hostB));
const double computation_amount = cpu_speed * 10;
const char *hostA_name = MSG_host_get_name(hostA);
const char *hostB_name = MSG_host_get_name(hostB);
{
msg_host_t vm0 = MSG_vm_create_core(pm0, "VM0");
- const double cpu_speed = MSG_get_host_speed(pm0);
+ const double cpu_speed = MSG_host_get_speed(pm0);
MSG_vm_set_bound(vm0, cpu_speed / 10);
MSG_vm_start(vm0);
MSG_host_set_params(vm0, ¶ms);
MSG_vm_start(vm0);
- const double cpu_speed = MSG_get_host_speed(pm0);
+ const double cpu_speed = MSG_host_get_speed(pm0);
MSG_vm_start(vm0);
XBT_INFO("# 10. Test migration");
msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t);
XBT_INFO("%s: %d core(s), %f flops/s per each", MSG_host_get_name(pm0), MSG_host_get_core_number(pm0),
- MSG_get_host_speed(pm0));
+ MSG_host_get_speed(pm0));
XBT_INFO("%s: %d core(s), %f flops/s per each", MSG_host_get_name(pm1), MSG_host_get_core_number(pm1),
- MSG_get_host_speed(pm1));
+ MSG_host_get_speed(pm1));
XBT_INFO("%s: %d core(s), %f flops/s per each", MSG_host_get_name(pm2), MSG_host_get_core_number(pm2),
- MSG_get_host_speed(pm2));
+ MSG_host_get_speed(pm2));
MSG_process_create("master", master_main, NULL, pm0);
hosts = MSG_hosts_as_dynar();
xbt_dynar_foreach(hosts, i, host){
- XBT_INFO("Host '%s' runs at %.0f flops/s",MSG_host_get_name(host), MSG_get_host_speed(host));
+ XBT_INFO("Host '%s' runs at %.0f flops/s",MSG_host_get_name(host), MSG_host_get_speed(host));
}
MSG_launch_application(argv[2]);
XBT_PUBLIC(void) MSG_host_on(msg_host_t host);
XBT_PUBLIC(void) MSG_host_off(msg_host_t host);
XBT_PUBLIC(msg_host_t) MSG_host_self(void);
-XBT_PUBLIC(double) MSG_get_host_speed(msg_host_t h);
+XBT_PUBLIC(double) MSG_host_get_speed(msg_host_t h);
XBT_PUBLIC(int) MSG_host_get_core_number(msg_host_t h);
XBT_PUBLIC(xbt_swag_t) MSG_host_get_process_list(msg_host_t h);
XBT_PUBLIC(int) MSG_host_is_on(msg_host_t h);
XBT_PUBLIC(int) MSG_host_is_off(msg_host_t h);
+// deprecated
+XBT_PUBLIC(double) MSG_get_host_speed(msg_host_t h);
+
+
XBT_PUBLIC(double) MSG_host_get_power_peak_at(msg_host_t h, int pstate);
XBT_PUBLIC(double) MSG_host_get_current_power_peak(msg_host_t h);
XBT_PUBLIC(int) MSG_host_get_nb_pstates(msg_host_t h);
return -1;
}
- return (jdouble) MSG_get_host_speed(host);
+ return (jdouble) MSG_host_get_speed(host);
}
JNIEXPORT jdouble JNICALL
}
JNIEXPORT void JNICALL
-Java_org_simgrid_msg_VM_setBound(JNIEnv *env, jobject jvm, jint load) {
+Java_org_simgrid_msg_VM_setBound(JNIEnv *env, jobject jvm, jdouble bound) {
msg_vm_t vm = jvm_get_native(env,jvm);
- double bound = MSG_get_host_speed(vm) * load / 100;
- MSG_vm_set_bound(vm, bound);
+ MSG_vm_set_bound(vm, bound);
}
JNIEXPORT void JNICALL
msg_vm_t vm = jvm_get_native(env,jvm);
MSG_vm_restore(vm);
}
-
-
-
-JNIEXPORT jobject JNICALL
-Java_org_simgrid_msg_VM_get_pm(JNIEnv *env, jobject jvm) {
- jobject jhost;
- msg_vm_t vm = jvm_get_native(env,jvm);
- msg_host_t host = MSG_vm_get_pm(vm);
-
- if (!host->extension(JAVA_HOST_LEVEL)) {
- THROW_DEADCODE;
- /* the native host not yet associated with the java host instance */
-
- /* instanciate a new java host instance */
- jhost = jhost_new_instance(env);
-
- if (!jhost) {
- jxbt_throw_jni(env, "java host instantiation failed");
- return NULL;
- }
-
- /* get a global reference to the newly created host */
- jhost = jhost_ref(env, jhost);
-
- if (!jhost) {
- jxbt_throw_jni(env, "global ref allocation failed");
- return NULL;
- }
- /* Sets the host name */
- const char *name = MSG_host_get_name(host);
- jobject jname = env->NewStringUTF(name);
- env->SetObjectField(jhost, jxbt_get_jfield(env,
- env->FindClass("org/simgrid/msg/Host"), "name", "Ljava/lang/String;"),
- jname);
- /* Bind & store it */
- jhost_bind(jhost, host, env);
- host->extension_set(JAVA_HOST_LEVEL, (void *) jhost);
- } else {
- jhost = (jobject) host->extension(JAVA_HOST_LEVEL);
- }
-
- return jhost;
-}
/**
* Class org_simgrid_msg_VM
* Method setBound
- * Signature ()B
+ * Signature (D)B
*/
JNIEXPORT void JNICALL
-Java_org_simgrid_msg_VM_setBound(JNIEnv *env, jobject jvm, jint load);
+Java_org_simgrid_msg_VM_setBound(JNIEnv *env, jobject jvm, jdouble bound);
/**
* Class org_simgrid_msg_VM
Java_org_simgrid_msg_VM_save(JNIEnv *env, jobject jvm);
/**
* Class org_simgrid_msg_VM
- * Method save
+ * Method restore
* Signature ()V
*/
JNIEXPORT void JNICALL
Java_org_simgrid_msg_VM_restore(JNIEnv *env, jobject jvm);
-JNIEXPORT jobject JNICALL
-Java_org_simgrid_msg_VM_get_pm(JNIEnv *env, jobject jvm);
-
SG_END_DECL()
#endif
/**
- * Bound the VM to a certain % of its vcpu capability (e.g. 75% of vm.getSpeed())
- * @param load percentage (between [0,100]
+ * Set a CPU bound for a given VM.
+ * @param bound in flops/s
*/
- public native void setBound(int load);
+ public native void setBound(double bound);
/**
* start the VM
/** \ingroup m_host_management
* \brief Return the speed of the processor (in flop/s), regardless of the current load on the machine.
*/
-double MSG_get_host_speed(msg_host_t host) {
+double MSG_host_get_speed(msg_host_t host) {
return host->speed();
}
+/** \ingroup m_host_management
+ * \brief Return the speed of the processor (in flop/s), regardless of the current load on the machine.
+ * Deprecated: use MSG_host_get_speed
+ */
+double MSG_get_host_speed(msg_host_t host) {
+ XBT_WARN("MSG_get_host_speed is deprecated: use MSG_host_get_speed");
+ return MSG_host_get_speed(host);
+}
+
+
/** \ingroup m_host_management
* \brief Return the number of cores.
*
{
/* For the moment, intensity_rate is the percentage against the migration
* bandwidth */
- double host_speed = MSG_get_host_speed(pm);
+ double host_speed = MSG_host_get_speed(pm);
double update_speed = ((double)dp_intensity/100) * mig_netspeed;
msg_vm_t vm = MSG_vm_create_core(pm, name);
bool BoostContext::parallel_ = false;
xbt_parmap_t BoostContext::parmap_ = nullptr;
-uintptr_t BoostContext::threads_working_ = 0;
+uintptr_t BoostContext::threads_working_ = 0;
xbt_os_thread_key_t BoostContext::worker_id_key_;
unsigned long BoostContext::process_index_ = 0;
BoostContext* BoostContext::maestro_context_ = nullptr;
static bool parallel_;
static xbt_parmap_t parmap_;
static std::vector<BoostContext*> workers_context_;
- static unsigned long threads_working_;
+ static uintptr_t threads_working_;
static xbt_os_thread_key_t worker_id_key_;
static unsigned long process_index_;
static BoostContext* maestro_context_;
xbt_os_thread_set_specific(raw_worker_id_key, (void*) worker_id);
RawContext* worker_context = (RawContext*) SIMIX_context_self();
raw_workers_context[worker_id] = worker_context;
- XBT_DEBUG("Saving worker stack %lu", worker_id);
+ XBT_DEBUG("Saving worker stack %zu", worker_id);
SIMIX_context_set_current(this);
raw_swapcontext(&worker_context->stack_top_, this->stack_top_);
#else