2 /* Copyright (c) 2007 Arnaud Legrand, Pedro Velho. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
6 * Modelling the proportional fairness using the Lagrange Optimization
7 * Approach. For a detailed description see:
8 * "ssh://username@scm.gforge.inria.fr/svn/memo/people/pvelho/lagrange/ppf.ps".
11 #include "xbt/sysdep.h"
12 #include "xbt/mallocator.h"
13 #include "maxmin_private.h"
21 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_lagrange, surf,
22 "Logging specific to SURF (lagrange)");
23 XBT_LOG_NEW_SUBCATEGORY(surf_lagrange_dichotomy, surf,
24 "Logging specific to SURF (lagrange dichotomy)");
27 * Local prototypes to implement the lagrangian optimization with optimal step, also called dichotomy.
29 //solves the proportional fairness using a lagrange optimizition with dichotomy step
30 void lagrange_solve(lmm_system_t sys);
31 //computes the value of the dichotomy using a initial values, init, with a specific variable or constraint
32 double dichotomy(double init, double diff(double, void *), void *var_cnst,
34 //computes the value of the differential of variable param_var applied to mu
35 double partial_diff_mu(double mu, void *param_var);
36 //computes the value of the differential of constraint param_cnst applied to lambda
37 double partial_diff_lambda(double lambda, void *param_cnst);
38 //auxiliar function to compute the partial_diff
39 double diff_aux(lmm_variable_t var, double x);
42 static int __check_kkt(xbt_swag_t cnst_list, xbt_swag_t var_list, int warn)
44 xbt_swag_t elem_list = NULL;
45 lmm_element_t elem = NULL;
46 lmm_constraint_t cnst = NULL;
47 lmm_variable_t var = NULL;
51 //verify the KKT property for each link
52 xbt_swag_foreach(cnst, cnst_list) {
54 elem_list = &(cnst->element_set);
55 xbt_swag_foreach(elem, elem_list) {
62 if (double_positive(tmp - cnst->bound)) {
65 ("The link (%p) is over-used. Expected less than %f and got %f",
66 cnst, cnst->bound, tmp);
69 DEBUG3("Checking KKT for constraint (%p): sat = %f, lambda = %f ",
70 cnst, tmp - cnst->bound, cnst->lambda);
72 /* if(!((fabs(tmp - cnst->bound)<MAXMIN_PRECISION && cnst->lambda>=MAXMIN_PRECISION) || */
73 /* (fabs(tmp - cnst->bound)>=MAXMIN_PRECISION && cnst->lambda<MAXMIN_PRECISION))) { */
74 /* if(warn) WARN1("The KKT condition is not verified for cnst %p...", cnst); */
79 //verify the KKT property of each flow
80 xbt_swag_foreach(var, var_list) {
81 if (var->bound < 0 || var->weight <= 0)
83 DEBUG3("Checking KKT for variable (%p): sat = %f mu = %f", var,
84 var->value - var->bound, var->mu);
86 if (double_positive(var->value - var->bound)) {
89 ("The variable (%p) is too large. Expected less than %f and got %f",
90 var, var->bound, var->value);
94 /* if(!((fabs(var->value - var->bound)<MAXMIN_PRECISION && var->mu>=MAXMIN_PRECISION) || */
95 /* (fabs(var->value - var->bound)>=MAXMIN_PRECISION && var->mu<MAXMIN_PRECISION))) { */
96 /* if(warn) WARN1("The KKT condition is not verified for var %p...",var); */
103 void lagrange_solve(lmm_system_t sys)
106 * Lagrange Variables.
108 int max_iterations = 100;
109 double epsilon_min_error = 1e-6;
110 double dichotomy_min_error = 1e-20;
111 double overall_error = 1;
114 * Variables to manipulate the data structure proposed to model the maxmin
115 * fairness. See docummentation for more details.
117 xbt_swag_t cnst_list = NULL;
118 lmm_constraint_t cnst = NULL;
120 xbt_swag_t var_list = NULL;
121 lmm_variable_t var = NULL;
124 * Auxiliar variables.
131 DEBUG0("Iterative method configuration snapshot =====>");
132 DEBUG1("#### Maximum number of iterations : %d", max_iterations);
133 DEBUG1("#### Minimum error tolerated : %e",
135 DEBUG1("#### Minimum error tolerated (dichotomy) : %e",
136 dichotomy_min_error);
138 if (!(sys->modified))
142 * Initialize the var list variable with only the active variables.
143 * Associate an index in the swag variables. Initialize mu.
145 var_list = &(sys->variable_set);
147 xbt_swag_foreach(var, var_list) {
148 if ((var->bound < 0.0) || (var->weight <= 0.0)) {
149 DEBUG1("#### NOTE var(%d) is a boundless (or inactive) variable", i);
155 DEBUG3("#### var(%d) %p ->mu : %e", i, var, var->mu);
156 DEBUG3("#### var(%d) %p ->weight: %e", i, var, var->weight);
157 DEBUG3("#### var(%d) %p ->bound: %e", i, var, var->bound);
164 cnst_list = &(sys->active_constraint_set);
165 xbt_swag_foreach(cnst, cnst_list) {
167 cnst->new_lambda = 2.0;
168 DEBUG2("#### cnst(%p)->lambda : %e", cnst, cnst->lambda);
172 * While doesn't reach a minimun error or a number maximum of iterations.
174 while (overall_error > epsilon_min_error && iteration < max_iterations) {
178 DEBUG1("************** ITERATION %d **************", iteration);
179 DEBUG0("-------------- Gradient Descent ----------");
181 * Compute the value of mu_i
183 //forall mu_i in mu_1, mu_2, ..., mu_n
184 xbt_swag_foreach(var, var_list) {
185 if ((var->bound >= 0) && (var->weight > 0)) {
186 DEBUG1("Working on var (%p)", var);
188 dichotomy(var->mu, partial_diff_mu, var, dichotomy_min_error);
189 dual_updated += (fabs(var->new_mu-var->mu)>dichotomy_min_error);
190 DEBUG2("dual_updated (%d) : %1.20f",dual_updated,fabs(var->new_mu-var->mu));
191 DEBUG3("Updating mu : var->mu (%p) : %1.20f -> %1.20f", var, var->mu, var->new_mu);
192 var->mu = var->new_mu;
197 * Compute the value of lambda_i
199 //forall lambda_i in lambda_1, lambda_2, ..., lambda_n
200 xbt_swag_foreach(cnst, cnst_list) {
201 DEBUG1("Working on cnst (%p)", cnst);
203 dichotomy(cnst->lambda, partial_diff_lambda, cnst,
204 dichotomy_min_error);
205 dual_updated += (fabs(cnst->new_lambda-cnst->lambda)>dichotomy_min_error);
206 DEBUG2("dual_updated (%d) : %1.20f",dual_updated,fabs(cnst->new_lambda-cnst->lambda));
207 DEBUG3("Updating lambda : cnst->lambda (%p) : %1.20f -> %1.20f", cnst, cnst->lambda, cnst->new_lambda);
208 cnst->lambda = cnst->new_lambda;
212 * Now computes the values of each variable (\rho) based on
213 * the values of \lambda and \mu.
215 DEBUG0("-------------- Check convergence ----------");
217 xbt_swag_foreach(var, var_list) {
218 if (var->weight <= 0)
221 //compute sigma_i + mu_i
223 for (i = 0; i < var->cnsts_number; i++) {
224 tmp += (var->cnsts[i].constraint)->lambda;
228 DEBUG3("\t Working on var (%p). cost = %e; Df = %e", var, tmp,
231 //uses the partial differential inverse function
232 tmp = var->func_fpi(var, tmp);
234 //computes de overall_error using normalized value
235 if (overall_error < (fabs(var->value - tmp))) {
236 overall_error = (fabs(var->value - tmp));
240 DEBUG3("New value of var (%p) = %e, overall_error = %e", var,
241 var->value, overall_error);
245 if (!__check_kkt(cnst_list, var_list, 0))
247 DEBUG2("Iteration %d: Overall_error : %f", iteration, overall_error);
249 WARN1("Could not improve the convergence at iteration %d. Drop it!",iteration);
255 __check_kkt(cnst_list, var_list, 1);
257 if (overall_error <= epsilon_min_error) {
258 DEBUG1("The method converges in %d iterations.", iteration);
260 if (iteration >= max_iterations) {
262 ("Method reach %d iterations, which is the maximum number of iterations allowed.",
265 /* INFO1("Method converged after %d iterations", iteration); */
267 if (XBT_LOG_ISENABLED(surf_lagrange, xbt_log_priority_debug)) {
273 * Returns a double value corresponding to the result of a dichotomy proccess with
274 * respect to a given variable/constraint (\mu in the case of a variable or \lambda in
275 * case of a constraint) and a initial value init.
277 * @param init initial value for \mu or \lambda
278 * @param diff a function that computes the differential of with respect a \mu or \lambda
279 * @param var_cnst a pointer to a variable or constraint
280 * @param min_erro a minimun error tolerated
282 * @return a double correponding to the result of the dichotomyal process
284 double dichotomy(double init, double diff(double, void *), void *var_cnst,
288 double overall_error;
290 double min_diff, max_diff, middle_diff;
300 min_diff = max_diff = middle_diff = 0.0;
303 if ((diff_0 = diff(1e-16, var_cnst)) >= 0) {
304 CDEBUG1(surf_lagrange_dichotomy, "returning 0.0 (diff = %e)",
310 min_diff = diff(min, var_cnst);
311 max_diff = diff(max, var_cnst);
313 while (overall_error > min_error) {
314 CDEBUG4(surf_lagrange_dichotomy,
315 "min, max = %1.20f, %1.20f || diffmin, diffmax %1.20f, %1.20f", min, max,
318 if (min_diff > 0 && max_diff > 0) {
320 CDEBUG0(surf_lagrange_dichotomy, "Decreasing min");
322 min_diff = diff(min, var_cnst);
324 CDEBUG0(surf_lagrange_dichotomy, "Decreasing max");
329 } else if (min_diff < 0 && max_diff < 0) {
331 CDEBUG0(surf_lagrange_dichotomy, "Increasing max");
333 max_diff = diff(max, var_cnst);
335 CDEBUG0(surf_lagrange_dichotomy, "Increasing min");
339 } else if (min_diff < 0 && max_diff > 0) {
340 middle = (max + min) / 2.0;
341 CDEBUG1(surf_lagrange_dichotomy, "Trying (max+min)/2 : %1.20f",middle);
343 if((min==middle) || (max==middle)) {
344 WARN0("Cannot improve the convergence!");
347 middle_diff = diff(middle, var_cnst);
349 if (middle_diff < 0) {
350 CDEBUG0(surf_lagrange_dichotomy, "Increasing min");
352 min_diff = middle_diff;
353 } else if (middle_diff > 0) {
354 CDEBUG0(surf_lagrange_dichotomy, "Decreasing max");
356 max_diff = middle_diff;
360 } else if (min_diff == 0) {
363 } else if (max_diff == 0) {
366 } else if (min_diff > 0 && max_diff < 0) {
367 CWARN0(surf_lagrange_dichotomy,
368 "The impossible happened, partial_diff(min) > 0 && partial_diff(max) < 0");
371 CWARN2(surf_lagrange_dichotomy,
372 "diffmin (%1.20f) or diffmax (%1.20f) are something I don't know, taking no action.",
378 CDEBUG1(surf_lagrange_dichotomy, "returning %e", (min + max) / 2.0);
380 return ((min + max) / 2.0);
386 double partial_diff_mu(double mu, void *param_var)
388 double mu_partial = 0.0;
389 double sigma_mu = 0.0;
390 lmm_variable_t var = (lmm_variable_t) param_var;
394 for (i = 0; i < var->cnsts_number; i++)
395 sigma_mu += (var->cnsts[i].constraint)->lambda;
397 //compute sigma_i + mu_i
400 //use auxiliar function passing (sigma_i + mu_i)
401 mu_partial = diff_aux(var, sigma_mu);
404 mu_partial += var->bound;
413 double partial_diff_lambda(double lambda, void *param_cnst)
417 xbt_swag_t elem_list = NULL;
418 lmm_element_t elem = NULL;
419 lmm_variable_t var = NULL;
420 lmm_constraint_t cnst = (lmm_constraint_t) param_cnst;
421 double lambda_partial = 0.0;
422 double sigma_i = 0.0;
425 elem_list = &(cnst->element_set);
427 CDEBUG1(surf_lagrange_dichotomy,"Computting diff of cnst (%p)", cnst);
429 xbt_swag_foreach(elem, elem_list) {
430 var = elem->variable;
431 if (var->weight <= 0)
434 //initilize de sumation variable
437 //compute sigma_i of variable var
438 for (i = 0; i < var->cnsts_number; i++) {
439 sigma_i += (var->cnsts[i].constraint)->lambda;
442 //add mu_i if this flow has a RTT constraint associated
446 //replace value of cnst->lambda by the value of parameter lambda
447 sigma_i = (sigma_i - cnst->lambda) + lambda;
449 //use the auxiliar function passing (\sigma_i + \mu_i)
450 lambda_partial += diff_aux(var, sigma_i);
454 lambda_partial += cnst->bound;
457 /* CDEBUG1(surf_lagrange_dichotomy, "returning = %1.20f", lambda_partial); */
460 return lambda_partial;
464 double diff_aux(lmm_variable_t var, double x)
466 double tmp_fpi, result;
468 XBT_IN2("(var (%p), x (%1.20f))", var, x);
469 xbt_assert0(var->func_fp,
470 "Initialize the protocol functions first create variables before.");
472 tmp_fpi = var->func_fpi(var, x);
475 /* CDEBUG1(surf_lagrange_dichotomy, "returning %1.20f", result); */
481 /**************** Vegas and Reno functions *************************/
483 * NOTE for Reno: all functions consider the network
484 * coeficient (alpha) equal to 1.
488 * For Vegas f: $\alpha_f d_f \log\left(x_f\right)$
490 #define VEGAS_SCALING 1000.0
491 double func_vegas_f(lmm_variable_t var, double x){
492 return VEGAS_SCALING*var->df * log(x);
496 * For Vegas fp: $\frac{\alpha D_f}{x}$
498 double func_vegas_fp(lmm_variable_t var, double x){
499 //avoid a disaster value - c'est du bricolage mais ca marche
500 /* if(x == 0) x = 10e-8; */
501 return VEGAS_SCALING*var->df/x;
505 * For Vegas fpi: $\frac{\alpha D_f}{x}$
507 double func_vegas_fpi(lmm_variable_t var, double x){
508 //avoid a disaster value - c'est du bricolage mais ca marche
509 /* if(x == 0) x = 10e-8; */
510 return VEGAS_SCALING*var->df/x;
514 * For Vegas fpip: $-\frac{\alpha D_f}{x^2}$
516 double func_vegas_fpip(lmm_variable_t var, double x){
517 //avoid a disaster value - c'est du bricolage mais ca marche
518 /* if(x == 0) x = 10e-8; */
519 return -( VEGAS_SCALING*var->df/(x*x) ) ;
524 * For Reno f: $\frac{\sqrt{\frac{3}{2}}}{D_f} \arctan\left(\sqrt{\frac{3}{2}}x_f D_f\right)$
526 double func_reno_f(lmm_variable_t var, double x){
527 xbt_assert0(var->df>0.0,"Don't call me with stupid values!");
528 // \sqrt{3/2} = 0.8164965808
529 return (0.8164965808 / var->df) * atan( (0.8164965808 / var->df)*x );
533 * For Reno fp: $\frac{3}{3 {D_f}^2 x^2 + 2}$
535 double func_reno_fp(lmm_variable_t var, double x){
536 return 3 / (3*var->df*var->df*x*x + 2);
540 * For Reno fpi: $\sqrt{\frac{1}{{D_f}^2 x} - \frac{2}{3{D_f}^2}}$
542 double func_reno_fpi(lmm_variable_t var, double x){
545 xbt_assert0(var->df>0.0,"Don't call me with stupid values!");
546 xbt_assert0(x>0.0,"Don't call me with stupid values!");
548 res_fpi = 1/(var->df*var->df*x) - 2/(3*var->df*var->df);
549 if(res_fpi<=0.0) return 0.0;
550 xbt_assert0(res_fpi>0.0,"Don't call me with stupid values!");
551 return sqrt(res_fpi);
555 * For Reno fpip: $-\frac{1}{2 {D_f}^2 x^2\sqrt{\frac{1}{{D_f}^2 x} - \frac{2}{3{D_f}^2}}}$
557 double func_reno_fpip(lmm_variable_t var, double x){
559 double critical_test;
561 xbt_assert0(var->df>0.0,"Don't call me with stupid values!");
562 xbt_assert0(x>0.0,"Don't call me with stupid values!");
564 res_fpip = 1/(var->df*var->df*x) - 2/(3*var->df*var->df);
565 xbt_assert0(res_fpip>0.0,"Don't call me with stupid values!");
566 critical_test = (2*var->df*var->df*x*x*sqrt(res_fpip));
568 return -(1.0/critical_test);