2 /* Copyright (c) 2007 Arnaud Legrand, Pedro Velho. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
6 * Modelling the proportional fairness using the Lagrange Optimization
7 * Approach. For a detailed description see:
8 * "ssh://username@scm.gforge.inria.fr/svn/memo/people/pvelho/lagrange/ppf.ps".
11 #include "xbt/sysdep.h"
12 #include "xbt/mallocator.h"
13 #include "maxmin_private.h"
20 #define VEGAS_SCALING 1000.0
23 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_lagrange, surf,
24 "Logging specific to SURF (lagrange)");
25 XBT_LOG_NEW_SUBCATEGORY(surf_lagrange_dichotomy, surf,
26 "Logging specific to SURF (lagrange dichotomy)");
29 * Local prototypes to implement the lagrangian optimization with optimal step, also called dichotomy.
31 //solves the proportional fairness using a lagrange optimizition with dichotomy step
32 void lagrange_solve(lmm_system_t sys);
33 //computes the value of the dichotomy using a initial values, init, with a specific variable or constraint
34 double dichotomy(double init, double diff(double, void *), void *var_cnst,
36 //computes the value of the differential of variable param_var applied to mu
37 double partial_diff_mu(double mu, void *param_var);
38 //computes the value of the differential of constraint param_cnst applied to lambda
39 double partial_diff_lambda(double lambda, void *param_cnst);
40 //auxiliar function to compute the partial_diff
41 double diff_aux(lmm_variable_t var, double x);
44 static int __check_kkt(xbt_swag_t cnst_list, xbt_swag_t var_list, int warn)
46 xbt_swag_t elem_list = NULL;
47 lmm_element_t elem = NULL;
48 lmm_constraint_t cnst = NULL;
49 lmm_variable_t var = NULL;
53 //verify the KKT property for each link
54 xbt_swag_foreach(cnst, cnst_list) {
56 elem_list = &(cnst->element_set);
57 xbt_swag_foreach(elem, elem_list) {
64 if (double_positive(tmp - cnst->bound)) {
67 ("The link (%p) is over-used. Expected less than %f and got %f",
68 cnst, cnst->bound, tmp);
71 DEBUG3("Checking KKT for constraint (%p): sat = %f, lambda = %f ",
72 cnst, tmp - cnst->bound, cnst->lambda);
75 //verify the KKT property of each flow
76 xbt_swag_foreach(var, var_list) {
77 if (var->bound < 0 || var->weight <= 0)
79 DEBUG3("Checking KKT for variable (%p): sat = %f mu = %f", var,
80 var->value - var->bound, var->mu);
82 if (double_positive(var->value - var->bound)) {
85 ("The variable (%p) is too large. Expected less than %f and got %f",
86 var, var->bound, var->value);
93 void lagrange_solve(lmm_system_t sys)
98 int max_iterations = 100;
99 double epsilon_min_error = 1e-6;
100 double dichotomy_min_error = 1e-20;
101 double overall_error = 1;
104 * Variables to manipulate the data structure proposed to model the maxmin
105 * fairness. See docummentation for more details.
107 xbt_swag_t cnst_list = NULL;
108 lmm_constraint_t cnst = NULL;
110 xbt_swag_t var_list = NULL;
111 lmm_variable_t var = NULL;
114 * Auxiliar variables.
121 DEBUG0("Iterative method configuration snapshot =====>");
122 DEBUG1("#### Maximum number of iterations : %d", max_iterations);
123 DEBUG1("#### Minimum error tolerated : %e",
125 DEBUG1("#### Minimum error tolerated (dichotomy) : %e",
126 dichotomy_min_error);
128 if (!(sys->modified))
132 * Initialize the var list variable with only the active variables.
133 * Associate an index in the swag variables. Initialize mu.
135 var_list = &(sys->variable_set);
137 xbt_swag_foreach(var, var_list) {
138 if ((var->bound < 0.0) || (var->weight <= 0.0)) {
139 DEBUG1("#### NOTE var(%d) is a boundless (or inactive) variable", i);
145 DEBUG3("#### var(%d) %p ->mu : %e", i, var, var->mu);
146 DEBUG3("#### var(%d) %p ->weight: %e", i, var, var->weight);
147 DEBUG3("#### var(%d) %p ->bound: %e", i, var, var->bound);
154 cnst_list = &(sys->active_constraint_set);
155 xbt_swag_foreach(cnst, cnst_list) {
157 cnst->new_lambda = 2.0;
158 DEBUG2("#### cnst(%p)->lambda : %e", cnst, cnst->lambda);
162 * While doesn't reach a minimun error or a number maximum of iterations.
164 while (overall_error > epsilon_min_error && iteration < max_iterations) {
168 DEBUG1("************** ITERATION %d **************", iteration);
169 DEBUG0("-------------- Gradient Descent ----------");
171 * Compute the value of mu_i
173 //forall mu_i in mu_1, mu_2, ..., mu_n
174 xbt_swag_foreach(var, var_list) {
175 if ((var->bound >= 0) && (var->weight > 0)) {
176 DEBUG1("Working on var (%p)", var);
178 dichotomy(var->mu, partial_diff_mu, var, dichotomy_min_error);
179 dual_updated += (fabs(var->new_mu-var->mu)>dichotomy_min_error);
180 DEBUG2("dual_updated (%d) : %1.20f",dual_updated,fabs(var->new_mu-var->mu));
181 DEBUG3("Updating mu : var->mu (%p) : %1.20f -> %1.20f", var, var->mu, var->new_mu);
182 var->mu = var->new_mu;
187 * Compute the value of lambda_i
189 //forall lambda_i in lambda_1, lambda_2, ..., lambda_n
190 xbt_swag_foreach(cnst, cnst_list) {
191 DEBUG1("Working on cnst (%p)", cnst);
193 dichotomy(cnst->lambda, partial_diff_lambda, cnst,
194 dichotomy_min_error);
195 dual_updated += (fabs(cnst->new_lambda-cnst->lambda)>dichotomy_min_error);
196 DEBUG2("dual_updated (%d) : %1.20f",dual_updated,fabs(cnst->new_lambda-cnst->lambda));
197 DEBUG3("Updating lambda : cnst->lambda (%p) : %1.20f -> %1.20f", cnst, cnst->lambda, cnst->new_lambda);
198 cnst->lambda = cnst->new_lambda;
202 * Now computes the values of each variable (\rho) based on
203 * the values of \lambda and \mu.
205 DEBUG0("-------------- Check convergence ----------");
207 xbt_swag_foreach(var, var_list) {
208 if (var->weight <= 0)
211 //compute sigma_i + mu_i
213 for (i = 0; i < var->cnsts_number; i++) {
214 tmp += (var->cnsts[i].constraint)->lambda;
218 DEBUG3("\t Working on var (%p). cost = %e; Df = %e", var, tmp,
221 //uses the partial differential inverse function
222 tmp = var->func_fpi(var, tmp);
224 //computes de overall_error using normalized value
225 if (overall_error < (fabs(var->value - tmp)/tmp)) {
226 overall_error = (fabs(var->value - tmp)/tmp);
229 if (overall_error < (fabs(var->value - tmp))) {
230 overall_error = (fabs(var->value - tmp));
234 DEBUG3("New value of var (%p) = %e, overall_error = %e", var,
235 var->value, overall_error);
239 if (!__check_kkt(cnst_list, var_list, 0))
241 DEBUG2("Iteration %d: Overall_error : %f", iteration, overall_error);
243 DEBUG1("Could not improve the convergence at iteration %d. Drop it!",iteration);
249 __check_kkt(cnst_list, var_list, 1);
251 if (overall_error <= epsilon_min_error) {
252 DEBUG1("The method converges in %d iterations.", iteration);
254 if (iteration >= max_iterations) {
256 ("Method reach %d iterations, which is the maximum number of iterations allowed.",
259 /* INFO1("Method converged after %d iterations", iteration); */
261 if (XBT_LOG_ISENABLED(surf_lagrange, xbt_log_priority_debug)) {
267 * Returns a double value corresponding to the result of a dichotomy proccess with
268 * respect to a given variable/constraint (\mu in the case of a variable or \lambda in
269 * case of a constraint) and a initial value init.
271 * @param init initial value for \mu or \lambda
272 * @param diff a function that computes the differential of with respect a \mu or \lambda
273 * @param var_cnst a pointer to a variable or constraint
274 * @param min_erro a minimun error tolerated
276 * @return a double correponding to the result of the dichotomyal process
278 double dichotomy(double init, double diff(double, void *), void *var_cnst,
282 double overall_error;
284 double min_diff, max_diff, middle_diff;
294 min_diff = max_diff = middle_diff = 0.0;
297 if ((diff_0 = diff(1e-16, var_cnst)) >= 0) {
298 CDEBUG1(surf_lagrange_dichotomy, "returning 0.0 (diff = %e)",
304 min_diff = diff(min, var_cnst);
305 max_diff = diff(max, var_cnst);
307 while (overall_error > min_error) {
308 CDEBUG4(surf_lagrange_dichotomy,
309 "[min, max] = [%1.20f, %1.20f] || diffmin, diffmax = %1.20f, %1.20f", min, max,
312 if (min_diff > 0 && max_diff > 0) {
314 CDEBUG0(surf_lagrange_dichotomy, "Decreasing min");
316 min_diff = diff(min, var_cnst);
318 CDEBUG0(surf_lagrange_dichotomy, "Decreasing max");
323 } else if (min_diff < 0 && max_diff < 0) {
325 CDEBUG0(surf_lagrange_dichotomy, "Increasing max");
327 max_diff = diff(max, var_cnst);
329 CDEBUG0(surf_lagrange_dichotomy, "Increasing min");
333 } else if (min_diff < 0 && max_diff > 0) {
334 middle = (max + min) / 2.0;
335 CDEBUG1(surf_lagrange_dichotomy, "Trying (max+min)/2 : %1.20f",middle);
337 if((min==middle) || (max==middle)) {
338 DEBUG0("Cannot improve the convergence!");
341 middle_diff = diff(middle, var_cnst);
343 if (middle_diff < 0) {
344 CDEBUG0(surf_lagrange_dichotomy, "Increasing min");
346 min_diff = middle_diff;
347 } else if (middle_diff > 0) {
348 CDEBUG0(surf_lagrange_dichotomy, "Decreasing max");
350 max_diff = middle_diff;
354 } else if (min_diff == 0) {
357 } else if (max_diff == 0) {
360 } else if (min_diff > 0 && max_diff < 0) {
361 CWARN0(surf_lagrange_dichotomy,
362 "The impossible happened, partial_diff(min) > 0 && partial_diff(max) < 0");
365 CWARN2(surf_lagrange_dichotomy,
366 "diffmin (%1.20f) or diffmax (%1.20f) are something I don't know, taking no action.",
372 CDEBUG1(surf_lagrange_dichotomy, "returning %e", (min + max) / 2.0);
374 return ((min + max) / 2.0);
380 double partial_diff_mu(double mu, void *param_var)
382 double mu_partial = 0.0;
383 double sigma_mu = 0.0;
384 lmm_variable_t var = (lmm_variable_t) param_var;
388 for (i = 0; i < var->cnsts_number; i++)
389 sigma_mu += (var->cnsts[i].constraint)->lambda;
391 //compute sigma_i + mu_i
394 //use auxiliar function passing (sigma_i + mu_i)
395 mu_partial = diff_aux(var, sigma_mu);
398 mu_partial += var->bound;
407 double partial_diff_lambda(double lambda, void *param_cnst)
411 xbt_swag_t elem_list = NULL;
412 lmm_element_t elem = NULL;
413 lmm_variable_t var = NULL;
414 lmm_constraint_t cnst = (lmm_constraint_t) param_cnst;
415 double lambda_partial = 0.0;
416 double sigma_i = 0.0;
419 elem_list = &(cnst->element_set);
421 CDEBUG1(surf_lagrange_dichotomy,"Computting diff of cnst (%p)", cnst);
423 xbt_swag_foreach(elem, elem_list) {
424 var = elem->variable;
425 if (var->weight <= 0)
428 //initilize de sumation variable
431 //compute sigma_i of variable var
432 for (i = 0; i < var->cnsts_number; i++) {
433 sigma_i += (var->cnsts[i].constraint)->lambda;
436 //add mu_i if this flow has a RTT constraint associated
440 //replace value of cnst->lambda by the value of parameter lambda
441 sigma_i = (sigma_i - cnst->lambda) + lambda;
443 //use the auxiliar function passing (\sigma_i + \mu_i)
444 lambda_partial += diff_aux(var, sigma_i);
448 lambda_partial += cnst->bound;
451 return lambda_partial;
455 double diff_aux(lmm_variable_t var, double x)
457 double tmp_fpi, result;
459 XBT_IN2("(var (%p), x (%1.20f))", var, x);
460 xbt_assert0(var->func_fp,
461 "Initialize the protocol functions first create variables before.");
463 tmp_fpi = var->func_fpi(var, x);
471 /**************** Vegas and Reno functions *************************/
473 * NOTE for Reno: all functions consider the network
474 * coeficient (alpha) equal to 1.
478 * For Vegas fpi: $\frac{\alpha D_f}{x}$
480 double func_vegas_fpi(lmm_variable_t var, double x){
481 xbt_assert0(x>0.0,"Don't call me with stupid values!");
482 return VEGAS_SCALING*var->df/x;
486 * For Reno fpi: $\sqrt{\frac{1}{{D_f}^2 x} - \frac{2}{3{D_f}^2}}$
488 double func_reno_fpi(lmm_variable_t var, double x){
491 xbt_assert0(var->df>0.0,"Don't call me with stupid values!");
492 xbt_assert0(x>0.0,"Don't call me with stupid values!");
494 res_fpi = 1/(var->df*var->df*x) - 2/(3*var->df*var->df);
495 if(res_fpi<=0.0) return 0.0;
496 xbt_assert0(res_fpi>0.0,"Don't call me with stupid values!");
497 return sqrt(res_fpi);