// TODO add VDI later
XBT_PUBLIC(msg_vm_t) MSG_vm_create_core(msg_host_t location, const char *name);
XBT_PUBLIC(msg_vm_t) MSG_vm_create(msg_host_t ind_pm, const char *name,
- int core_nb, long mem_cap, long net_cap, char *disk_path, long disk_size, long dp_rate, long mig_netspeed);
+ int core_nb, int mem_cap, int net_cap, char *disk_path, int disk_size, int mig_netspeed, int dp_intensity);
XBT_PUBLIC(void) MSG_vm_destroy(msg_vm_t vm);
}
JNIEXPORT void JNICALL
Java_org_simgrid_msg_VM_create(JNIEnv *env, jobject jvm, jobject jhost, jstring jname,
- jint jncore, jlong jramsize, jlong jnetcap, jstring jdiskpath, jlong jdisksize, jlong jdprate, jlong jmig_netspeed) {
+ jint jncore, jint jramsize, jint jnetcap, jstring jdiskpath, jint jdisksize, jint jmig_netspeed, jint jdp_intensity) {
msg_host_t host = jhost_get_native(env, jhost);
// disk_path = (*env)->GetStringUTFChars(env, jdiskpath, 0);
// disk_path = xbt_strdup(disk_path);
- msg_vm_t vm = MSG_vm_create(host, name, (int) jncore, (long) jramsize,
- (long) jnetcap, NULL, (long) jdisksize, (long) jdprate, (long) jmig_netspeed);
+ msg_vm_t vm = MSG_vm_create(host, name, (int) jncore, (int) jramsize,
+ (int) jnetcap, NULL, (int) jdisksize, (int) jmig_netspeed, (int) jdp_intensity);
jvm_bind(env,jvm,vm);
}
*/
JNIEXPORT void JNICALL
Java_org_simgrid_msg_VM_create(JNIEnv *env, jobject jvm, jobject jhost, jstring jname,
- jint jncore, jlong jramsize, jlong jnetcap, jstring jdiskpath, jlong jdisksize, jlong dprate, jlong mig_netspeed);
+ jint jncore, jint jramsize, jint jnetcap, jstring jdiskpath, jint jdisksize, jint dprate, jint mig_netspeed);
/**
* Class org_simgrid_msg_VM
* @param netCap (not used for the moment)
* @param diskPath (not used for the moment)
* @param diskSize (not used for the moment)
- * @param dpRate (dirty page rate MB/flop, if you don't know put zero ;))
* @param migNetSpeed (network bandwith allocated for migrations in MB/s, if you don't know put zero ;))
+ * @param dpIntensity (dirty page percentage according to migNetSpeed, [0-100], if you don't know put zero ;))
*/
- public VM(Host host, String name, int nCore, long ramSize,
- long netCap, String diskPath, long diskSize, long dpRate,long migNetSpeed){
+ public VM(Host host, String name, int nCore, int ramSize,
+ int netCap, String diskPath, int diskSize, int migNetSpeed, int dpIntensity){
super();
super.name = name;
this.currentHost = host;
- create(host, name, nCore, ramSize, netCap, diskPath, diskSize, dpRate, migNetSpeed);
+ create(host, name, nCore, ramSize, netCap, diskPath, diskSize, migNetSpeed, dpIntensity);
VM.addVM(this);
}
* @param netCap (not used for the moment)
* @param diskPath (not used for the moment)
* @param diskSize (not used for the moment)
- * @param dpRate (dirty page rate in MB/flop, if you don't know put zero ;))
* @param migNetSpeed (network bandwith allocated for migrations in MB/s, if you don't know put zero ;))
+ * @param dpIntensity (dirty page intensity, a percentage of migNetSpeed [0-100], if you don't know put zero ;))
*/
- private native void create(Host host, String name, int nCore, long ramSize,
- long netCap, String diskPath, long diskSize, long dpRate, long migNetSpeed);
+ private native void create(Host host, String name, int nCore, int ramSize,
+ int netCap, String diskPath, int diskSize, int migNetSpeed, int dpIntensity);
/**
* start the VM
* All parameters are in MBytes
*
*/
-msg_vm_t MSG_vm_create(msg_host_t ind_pm, const char *name, int ncpus, long ramsize,
- long net_cap, char *disk_path, long disksize,
- long dp_rate, long mig_netspeed)
+msg_vm_t MSG_vm_create(msg_host_t ind_pm, const char *name, int ncpus, int ramsize,
+ int net_cap, char *disk_path, int disksize,
+ int mig_netspeed, int dp_intensity)
{
+ /* For the moment, intensity_rate is the percentage against the migration bandwidth */
+ double host_speed = MSG_get_host_speed(ind_pm);
+ double update_speed = ((double)dp_intensity/100) * mig_netspeed;
+
msg_vm_t vm = MSG_vm_create_core(ind_pm, name);
s_ws_params_t params;
memset(¶ms, 0, sizeof(params));
params.devsize = 0;
params.skip_stage2 = 0;
params.max_downtime = 0.03;
- params.dp_rate = 1L * 1024 * 1024 * dp_rate;
+ params.dp_rate = (update_speed * 1L * 1024 * 1024 ) / host_speed;
params.dp_cap = params.ramsize / 0.9; // working set memory is 90%
params.mig_speed = 1L * 1024 * 1024 * mig_netspeed; // mig_speed
+ //XBT_INFO("dp rate %f migspeed : %f intensity mem : %d, updatespeed %f, hostspeed %f",params.dp_rate, params.mig_speed, dp_intensity, update_speed, host_speed);
simcall_host_set_params(vm, ¶ms);
return vm;
*/
void MSG_vm_set_bound(msg_vm_t vm, double bound)
{
- return simcall_vm_set_bound(vm, bound);
+ return simcall_vm_set_bound(vm, bound);
}
surf_min_index = 0;
/* sequential version */
- struct timeval bla;
- gettimeofday(&bla, NULL);
- //unsigned int t = end.tv_usec - begin.tv_usec;
- XBT_INFO("Surf_solve : bla.before iteration:%lu/%lu",bla.tv_sec, bla.tv_usec);
xbt_dynar_foreach(model_list_invoke, iter, model) {
- gettimeofday(&bla, NULL);
- XBT_INFO("Surf_solve : iteration:%lu/%lu",bla.tv_sec, bla.tv_usec);
surf_share_resources(model);
}
- gettimeofday(&bla, NULL);
- XBT_INFO("Surf_solve : end before iteration:%lu/%lu",bla.tv_sec, bla.tv_usec);
unsigned i;
for (i = 0; i < xbt_dynar_length(model_list_invoke); i++) {
if ((min < 0.0 || surf_mins[i] < min)