Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Move fast dual method from MICOM #252

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion CHANGELOG.rst
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

Next Release
-----
* move the fast_dual function from MICOM back here

1.8.2
-----
Expand Down Expand Up @@ -33,7 +34,6 @@ Next Release
* Tests are run for sympy and symengine now.
* Updated support Python versions to >=3.8.


1.6.1
-----
* fix the Gurobi version check to allow 10.0
Expand Down
195 changes: 191 additions & 4 deletions src/optlang/duality.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import optlang
from . import symbolics as S
import logging

logger = logging.Logger(__name__)


# This function is very complex. Should maybe be refactored
def convert_linear_problem_to_dual(model, sloppy=False, infinity=None, maintain_standard_form=True, prefix="dual_", dual_model=None): # NOQA
"""
"""Convert an LP to its dual form.

A mathematical optimization problem can be viewed as a primal and a dual problem. If the primal problem is
a minimization problem the dual is a maximization problem, and the optimal value of the dual is a lower bound of
the optimal value of the primal.
Expand Down Expand Up @@ -134,7 +139,7 @@ def convert_linear_problem_to_dual(model, sloppy=False, infinity=None, maintain_
# Add dual constraints from primal objective
primal_objective_dict = model.objective.expression.as_coefficients_dict()
for variable in model.variables:
expr = optlang.symbolics.add([(coef * dual_var) for dual_var, coef in coefficients[variable.name].items()])
expr = S.add([(coef * dual_var) for dual_var, coef in coefficients[variable.name].items()])
obj_coef = primal_objective_dict[variable]
if maximization:
const = model.interface.Constraint(expr, lb=obj_coef, name=prefix + variable.name)
Expand All @@ -143,11 +148,193 @@ def convert_linear_problem_to_dual(model, sloppy=False, infinity=None, maintain_
dual_model.add(const)

# Make dual objective
expr = optlang.symbolics.add([(coef * dual_var) for dual_var, coef in dual_objective.items() if coef != 0])
expr = S.add([(coef * dual_var) for dual_var, coef in dual_objective.items() if coef != 0])
if maximization:
objective = model.interface.Objective(expr, direction="min")
else:
objective = model.interface.Objective(expr, direction="max")
dual_model.objective = objective

return dual_model


def fast_dual(model, prefix="dual_"):
"""Add dual formulation to the problem.

A mathematical optimization problem can be viewed as a primal and a dual
problem. If the primal problem is a minimization problem the dual is a
maximization problem, and the optimal value of the dual is a lower bound of
the optimal value of the primal. For linear problems, strong duality holds,
which means that the optimal values of the primal and dual are equal
(duality gap = 0). This functions takes an optlang Model representing a
primal linear problem and adds in the dual formulation directly, creating a
primal/dual problem.

The provided model must have a linear objective, linear constraints and only
continuous variables. Furthermore, the problem must be in standard form,
i.e. all variables should be non-negative. Both minimization and
maximization problems are allowed.

This will be faster than `convert_linear_problem_to_dual` and will only return
the dual objective coefficients of the primal/dual problem. It is meant to be
used in multiple objective optimization where multiple primal objectives are
added as dual constraints to the primal/dual problem.

Attributes
----------
model : optlang.Model
The model to be dualized.
prefix : str
The string that will be prepended to all variable and constraint names
in the returned dual problem.

Returns
-------
dict
The coefficients for the new dual objective.

"""
logger.info("adding dual variables")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Either remove this, or lower the log level to debug, in my opinion.

Suggested change
logger.info("adding dual variables")
logger.debug("adding dual variables")

if len(model.variables) > 1e5:
logger.warning(
"the model has a lot of variables,"
"dual optimization will be extremely slow :O"
)

prob = model.interface
maximization = model.objective.direction == "max"

if maximization:
sign = 1
else:
sign = -1

coefficients = {}
dual_objective = {}
to_add = []

# Add dual variables from primal constraints:
for constraint in model.constraints:
if constraint.expression == 0:
continue # Skip empty constraint
if not constraint.is_Linear:
raise ValueError(
"Non-linear problems are not supported: " + str(constraint)
)
if constraint.lb is None and constraint.ub is None:
logger.debug("skipped free constraint %s" % constraint.name)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Here, I'm wondering if it is better to have it at warning level?

continue # Skip free constraint
if constraint.lb == constraint.ub:
const_var = prob.Variable(
prefix + constraint.name + "_constraint", lb=None, ub=None
)
to_add.append(const_var)
if constraint.lb != 0:
dual_objective[const_var.name] = sign * constraint.lb
coefs = constraint.get_linear_coefficients(constraint.variables)
for variable, coef in coefs.items():
coefficients.setdefault(variable.name, {})[const_var.name] = (
sign * coef
)
else:
if constraint.lb is not None:
lb_var = prob.Variable(
prefix + constraint.name + "_constraint_lb", lb=0, ub=None
)
to_add.append(lb_var)
if constraint.lb != 0:
dual_objective[lb_var.name] = -sign * constraint.lb
if constraint.ub is not None:
ub_var = prob.Variable(
prefix + constraint.name + "_constraint_ub", lb=0, ub=None
)
to_add.append(ub_var)
if constraint.ub != 0:
dual_objective[ub_var.name] = sign * constraint.ub

if not (
constraint.expression.is_Add or constraint.expression.is_Mul
):
raise ValueError(
"Invalid expression type: " + str(type(constraint.expression))
)
if constraint.expression.is_Add:
coefficients_dict = constraint.get_linear_coefficients(
constraint.variables
)
else: # constraint.expression.is_Mul:
args = constraint.expression.args
coefficients_dict = {args[1]: args[0]}

for variable, coef in coefficients_dict.items():
if constraint.lb is not None:
coefficients.setdefault(variable.name, {})[lb_var.name] = (
-sign * coef
)
if constraint.ub is not None:
coefficients.setdefault(variable.name, {})[ub_var.name] = (
sign * coef
)

# Add dual variables from primal bounds
for variable in model.variables:
if not variable.type == "continuous":
raise ValueError(
"Integer variables are not supported: " + str(variable)
)
if variable.lb is not None and variable.lb < 0:
raise ValueError(
"Problem is not in standard form ("
+ variable.name
+ " can be negative)"
)
if variable.lb > 0:
bound_var = prob.Variable(
prefix + variable.name + "_lb", lb=0, ub=None
)
to_add.append(bound_var)
coefficients.setdefault(variable.name, {})[bound_var.name] = -sign
dual_objective[bound_var.name] = -sign * variable.lb
if variable.ub is not None:
bound_var = prob.Variable(
prefix + variable.name + "_ub", lb=0, ub=None
)
to_add.append(bound_var)
coefficients.setdefault(variable.name, {})[bound_var.name] = sign
if variable.ub != 0:
dual_objective[bound_var.name] = sign * variable.ub

model.add(to_add)

# Add dual constraints from primal objective
primal_objective_dict = model.objective.get_linear_coefficients(
model.objective.variables
)
for variable in model.objective.variables:
obj_coef = primal_objective_dict[variable]
if maximization:
const = prob.Constraint(
S.Zero, lb=obj_coef, name=prefix + variable.name
)
else:
const = prob.Constraint(
S.Zero, ub=obj_coef, name=prefix + variable.name
)
model.add([const])
model.update()
coefs = {
model.variables[vid]: coef
for vid, coef in coefficients[variable.name].items()
}
const.set_linear_coefficients(coefs)

# Make dual objective
coefs = {
model.variables[vid]: coef
for vid, coef in dual_objective.items()
if coef != 0
}
logger.info("dual model has {} terms in objective".format(len(coefs)))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
logger.info("dual model has {} terms in objective".format(len(coefs)))
logger.info("dual model has %d terms in objective", len(coefs))


return coefs

17 changes: 16 additions & 1 deletion src/optlang/tests/test_duality.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,9 @@
import unittest

import optlang
from optlang.duality import convert_linear_problem_to_dual
from optlang.duality import convert_linear_problem_to_dual, fast_dual
from optlang.glpk_interface import Constraint, Model, Objective, Variable
from optlang.symbolics import Zero


class DualityTestCase(unittest.TestCase):
Expand Down Expand Up @@ -193,3 +194,17 @@ def test_explicit_model(self):

self.assertEqual(self.model.objective.value, 31)
self.assertEqual(dual.objective.value, 31)

def test_fast_dual(self):
model = self.model
self.assertEqual(model.optimize(), optlang.interface.OPTIMAL)
primal_res = model.objective.value
dual_coefs = fast_dual(model)
dual_obj = model.interface.Objective(Zero, direction="min")
model.objective = dual_obj
model.objective.set_linear_coefficients(dual_coefs)
self.assertEqual(model.optimize(), optlang.interface.OPTIMAL)
dual_res = model.objective.value
self.assertEqual(primal_res, dual_res)


Loading