namespace kernel {
namespace lmm {
-double (*func_f_def)(const s_lmm_variable_t&, double);
-double (*func_fp_def)(const s_lmm_variable_t&, double);
-double (*func_fpi_def)(const s_lmm_variable_t&, double);
+double (*func_f_def)(const Variable&, double);
+double (*func_fp_def)(const Variable&, double);
+double (*func_fpi_def)(const Variable&, double);
/*
* Local prototypes to implement the Lagrangian optimization with optimal step, also called dichotomy.
// solves the proportional fairness using a Lagrangian optimization with dichotomy step
void lagrange_solve(lmm_system_t sys);
// computes the value of the dichotomy using a initial values, init, with a specific variable or constraint
-static double dichotomy(double init, double diff(double, const s_lmm_constraint_t&), const s_lmm_constraint_t& cnst,
- double min_error);
+static double dichotomy(double init, double diff(double, const Constraint&), const Constraint& cnst, double min_error);
// computes the value of the differential of constraint cnst applied to lambda
-static double partial_diff_lambda(double lambda, const s_lmm_constraint_t& cnst);
+static double partial_diff_lambda(double lambda, const Constraint& cnst);
template <class CnstList, class VarList>
static int __check_feasible(const CnstList& cnst_list, const VarList& var_list, int warn)
{
- for (s_lmm_constraint_t const& cnst : cnst_list) {
+ for (Constraint const& cnst : cnst_list) {
double tmp = 0;
- for (s_lmm_element_t const& elem : cnst.enabled_element_set) {
+ for (Element const& elem : cnst.enabled_element_set) {
lmm_variable_t var = elem.variable;
xbt_assert(var->sharing_weight > 0);
tmp += var->value;
XBT_DEBUG("Checking feasability for constraint (%p): sat = %f, lambda = %f ", &cnst, tmp - cnst.bound, cnst.lambda);
}
- for (s_lmm_variable_t const& var : var_list) {
+ for (Variable const& var : var_list) {
if (not var.sharing_weight)
break;
if (var.bound < 0)
return 1;
}
-static double new_value(const s_lmm_variable_t& var)
+static double new_value(const Variable& var)
{
double tmp = 0;
- for (s_lmm_element_t const& elem : var.cnsts) {
+ for (Element const& elem : var.cnsts) {
tmp += elem.constraint->lambda;
}
if (var.bound > 0)
return var.func_fpi(var, tmp);
}
-static double new_mu(const s_lmm_variable_t& var)
+static double new_mu(const Variable& var)
{
double mu_i = 0.0;
double sigma_i = 0.0;
- for (s_lmm_element_t const& elem : var.cnsts) {
+ for (Element const& elem : var.cnsts) {
sigma_i += elem.constraint->lambda;
}
mu_i = var.func_fp(var, var.bound) - sigma_i;
{
double obj = 0.0;
- for (s_lmm_variable_t const& var : var_list) {
+ for (Variable const& var : var_list) {
double sigma_i = 0.0;
if (not var.sharing_weight)
break;
- for (s_lmm_element_t const& elem : var.cnsts)
+ for (Element const& elem : var.cnsts)
sigma_i += elem.constraint->lambda;
if (var.bound > 0)
obj += var.mu * var.bound;
}
- for (s_lmm_constraint_t const& cnst : cnst_list)
+ for (Constraint const& cnst : cnst_list)
obj += cnst.lambda * cnst.bound;
return obj;
/* Initialize lambda. */
auto& cnst_list = sys->active_constraint_set;
- for (s_lmm_constraint_t& cnst : cnst_list) {
+ for (Constraint& cnst : cnst_list) {
cnst.lambda = 1.0;
cnst.new_lambda = 2.0;
XBT_DEBUG("#### cnst(%p)->lambda : %e", &cnst, cnst.lambda);
* Initialize the var_list variable with only the active variables. Initialize mu.
*/
auto& var_list = sys->variable_set;
- for (s_lmm_variable_t& var : var_list) {
+ for (Variable& var : var_list) {
if (not var.sharing_weight)
var.value = 0.0;
else {
XBT_DEBUG("#### var(%p) ->mu : %e", &var, var.mu);
XBT_DEBUG("#### var(%p) ->weight: %e", &var, var.sharing_weight);
XBT_DEBUG("#### var(%p) ->bound: %e", &var, var.bound);
- auto weighted = std::find_if(begin(var.cnsts), end(var.cnsts),
- [](s_lmm_element_t const& x) { return x.consumption_weight != 0.0; });
+ auto weighted =
+ std::find_if(begin(var.cnsts), end(var.cnsts), [](Element const& x) { return x.consumption_weight != 0.0; });
if (weighted == end(var.cnsts))
var.value = 1.0;
}
XBT_DEBUG("-------------- Gradient Descent ----------");
/* Improve the value of mu_i */
- for (s_lmm_variable_t& var : var_list) {
+ for (Variable& var : var_list) {
if (var.sharing_weight && var.bound >= 0) {
XBT_DEBUG("Working on var (%p)", &var);
var.new_mu = new_mu(var);
}
/* Improve the value of lambda_i */
- for (s_lmm_constraint_t& cnst : cnst_list) {
+ for (Constraint& cnst : cnst_list) {
XBT_DEBUG("Working on cnst (%p)", &cnst);
cnst.new_lambda = dichotomy(cnst.lambda, partial_diff_lambda, cnst, dichotomy_min_error);
XBT_DEBUG("Updating lambda : cnst->lambda (%p) : %1.20f -> %1.20f", &cnst, cnst.lambda, cnst.new_lambda);
/* Now computes the values of each variable (\rho) based on the values of \lambda and \mu. */
XBT_DEBUG("-------------- Check convergence ----------");
overall_modification = 0;
- for (s_lmm_variable_t& var : var_list) {
+ for (Variable& var : var_list) {
if (var.sharing_weight <= 0)
var.value = 0.0;
else {
*
* @return a double corresponding to the result of the dichotomy process
*/
-static double dichotomy(double init, double diff(double, const s_lmm_constraint_t&), const s_lmm_constraint_t& cnst,
- double min_error)
+static double dichotomy(double init, double diff(double, const Constraint&), const Constraint& cnst, double min_error)
{
double min = init;
double max = init;
return ((min + max) / 2.0);
}
-static double partial_diff_lambda(double lambda, const s_lmm_constraint_t& cnst)
+static double partial_diff_lambda(double lambda, const Constraint& cnst)
{
double diff = 0.0;
XBT_CDEBUG(surf_lagrange_dichotomy, "Computing diff of cnst (%p)", &cnst);
- for (s_lmm_element_t const& elem : cnst.enabled_element_set) {
- s_lmm_variable_t& var = *elem.variable;
+ for (Element const& elem : cnst.enabled_element_set) {
+ Variable& var = *elem.variable;
xbt_assert(var.sharing_weight > 0);
XBT_CDEBUG(surf_lagrange_dichotomy, "Computing sigma_i for var (%p)", &var);
// Initialize the summation variable
double sigma_i = 0.0;
// Compute sigma_i
- for (s_lmm_element_t const& elem2 : var.cnsts)
+ for (Element const& elem2 : var.cnsts)
sigma_i += elem2.constraint->lambda;
// add mu_i if this flow has a RTT constraint associated
* programming.
*
*/
-void lmm_set_default_protocol_function(double (*func_f)(const s_lmm_variable_t& var, double x),
- double (*func_fp)(const s_lmm_variable_t& var, double x),
- double (*func_fpi)(const s_lmm_variable_t& var, double x))
+void set_default_protocol_function(double (*func_f)(const Variable& var, double x),
+ double (*func_fp)(const Variable& var, double x),
+ double (*func_fpi)(const Variable& var, double x))
{
func_f_def = func_f;
func_fp_def = func_fp;
* Therefore: $fp(x) = \frac{\alpha D_f}{x}$
* Therefore: $fpi(x) = \frac{\alpha D_f}{x}$
*/
-double func_vegas_f(const s_lmm_variable_t& var, double x)
+double func_vegas_f(const Variable& var, double x)
{
xbt_assert(x > 0.0, "Don't call me with stupid values! (%1.20f)", x);
return VEGAS_SCALING * var.sharing_weight * log(x);
}
-double func_vegas_fp(const s_lmm_variable_t& var, double x)
+double func_vegas_fp(const Variable& var, double x)
{
xbt_assert(x > 0.0, "Don't call me with stupid values! (%1.20f)", x);
return VEGAS_SCALING * var.sharing_weight / x;
}
-double func_vegas_fpi(const s_lmm_variable_t& var, double x)
+double func_vegas_fpi(const Variable& var, double x)
{
xbt_assert(x > 0.0, "Don't call me with stupid values! (%1.20f)", x);
return var.sharing_weight / (x / VEGAS_SCALING);
* Therefore: $fp(x) = \frac{3}{3 D_f^2 x^2+2}$
* Therefore: $fpi(x) = \sqrt{\frac{1}{{D_f}^2 x} - \frac{2}{3{D_f}^2}}$
*/
-double func_reno_f(const s_lmm_variable_t& var, double x)
+double func_reno_f(const Variable& var, double x)
{
xbt_assert(var.sharing_weight > 0.0, "Don't call me with stupid values!");
return RENO_SCALING * sqrt(3.0 / 2.0) / var.sharing_weight * atan(sqrt(3.0 / 2.0) * var.sharing_weight * x);
}
-double func_reno_fp(const s_lmm_variable_t& var, double x)
+double func_reno_fp(const Variable& var, double x)
{
return RENO_SCALING * 3.0 / (3.0 * var.sharing_weight * var.sharing_weight * x * x + 2.0);
}
-double func_reno_fpi(const s_lmm_variable_t& var, double x)
+double func_reno_fpi(const Variable& var, double x)
{
double res_fpi;
* Therefore: $fp(x) = 2/(Weight*x + 2)
* Therefore: $fpi(x) = (2*Weight)/x - 4
*/
-double func_reno2_f(const s_lmm_variable_t& var, double x)
+double func_reno2_f(const Variable& var, double x)
{
xbt_assert(var.sharing_weight > 0.0, "Don't call me with stupid values!");
return RENO2_SCALING * (1.0 / var.sharing_weight) *
log((x * var.sharing_weight) / (2.0 * x * var.sharing_weight + 3.0));
}
-double func_reno2_fp(const s_lmm_variable_t& var, double x)
+double func_reno2_fp(const Variable& var, double x)
{
return RENO2_SCALING * 3.0 / (var.sharing_weight * x * (2.0 * var.sharing_weight * x + 3.0));
}
-double func_reno2_fpi(const s_lmm_variable_t& var, double x)
+double func_reno2_fpi(const Variable& var, double x)
{
xbt_assert(x > 0.0, "Don't call me with stupid values!");
double tmp = x * var.sharing_weight * var.sharing_weight;