Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
123 changes: 123 additions & 0 deletions gtsam/constrained/AugmentedLagrangian.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
/* ----------------------------------------------------------------------------

* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)

* See LICENSE for the license information

* -------------------------------------------------------------------------- */

/**
* @file AugmentedLagrangian.cpp
* @brief Augmented Lagrangian method for nonlinear constrained optimization.
* @author Yetong Zhang
* @date Aug 3, 2024
*/

#include <gtsam/constrained/AugmentedLagrangian.h>
#include <gtsam/constrained/AugmentedLagrangianOptimizer.h>
#include <gtsam/slam/AntiFactor.h>
#include <iostream>

namespace gtsam {

/** A factor that adds a constant bias term to the original factor.
* This factor is used in augmented Lagrangian optimizer to create biased cost
* functions.
* Note that the noise model is stored twice (both in original factor and the
* noisemodel of substitute factor. The noisemodel in the original factor will
* be ignored. */
class GTSAM_EXPORT BiasedFactor : public NoiseModelFactor {
protected:
typedef NoiseModelFactor Base;
typedef BiasedFactor This;

// original factor
Base::shared_ptr originalFactor_;
Vector bias_;

public:
typedef std::shared_ptr<This> shared_ptr;

/** Default constructor for I/O only */
BiasedFactor() {}

/** Destructor */
~BiasedFactor() override {}

/**
* Constructor
* @param originalFactor original factor on X
* @param bias the bias term
*/
BiasedFactor(const Base::shared_ptr& originalFactor, const Vector& bias)
: Base(originalFactor->noiseModel(), originalFactor->keys()),
originalFactor_(originalFactor),
bias_(bias) {}

/**
* Error function *without* the NoiseModel, \f$ z-h(x) \f$.
* Override this method to finish implementing an N-way factor.
* If the optional arguments is specified, it should compute
* both the function evaluation and its derivative(s) in H.
*/
virtual Vector unwhitenedError(
const Values& x,
gtsam::OptionalMatrixVecType H = nullptr) const override {
return originalFactor_->unwhitenedError(x, H) + bias_;
}

/** print */
void print(const std::string& s, const KeyFormatter& keyFormatter =
DefaultKeyFormatter) const override {
std::cout << s << "BiasedFactor " << bias_.transpose()
<< " version of:" << std::endl;
originalFactor_->print(s, keyFormatter);
}

/** Return a deep copy of this factor. */
gtsam::NonlinearFactor::shared_ptr clone() const override {
return std::static_pointer_cast<gtsam::NonlinearFactor>(
gtsam::NonlinearFactor::shared_ptr(new This(*this)));
}

}; // \class BiasedFactor

NonlinearFactorGraph AugmentedLagrangianFunction(
const ConstrainedOptProblem& problem, const std::vector<Vector>& lambdaEq,
const std::vector<double>& lambdaIneq, double muEq, double muIneq,
InequalityPenaltyFunction::shared_ptr ineqConstraintPenaltyFunction,
const double epsilon) {
// Initialize by adding in cost factors.
NonlinearFactorGraph graph = problem.costs();

// Create factors corresponding to equality constraints.
const NonlinearEqualityConstraints& eqConstraints = problem.eConstraints();
for (size_t i = 0; i < eqConstraints.size(); i++) {
const auto& constraint = eqConstraints.at(i);
Vector bias = lambdaEq[i] / muEq * constraint->sigmas();
auto penalty_l2 = constraint->penaltyFactor(muEq);
graph.emplace_shared<BiasedFactor>(penalty_l2, bias);
}

// Create factors corresponding to penalty terms of inequality constraints.
const NonlinearInequalityConstraints& ineqConstraints =
problem.iConstraints();
graph.add(ineqConstraints.penaltyGraphCustom(ineqConstraintPenaltyFunction,
muIneq));

// Create factors corresponding to Lagrange multiplier terms of i-constraints.
for (size_t i = 0; i < ineqConstraints.size(); i++) {
const auto& constraint = ineqConstraints.at(i);
Vector bias = lambdaIneq[i] / epsilon * constraint->sigmas();
auto penalty_l2 = constraint->penaltyFactorEquality(epsilon);
graph.emplace_shared<BiasedFactor>(penalty_l2, bias);
graph.emplace_shared<AntiFactor>(penalty_l2);
}

return graph;
}

} // namespace gtsam
62 changes: 62 additions & 0 deletions gtsam/constrained/AugmentedLagrangian.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
/* ----------------------------------------------------------------------------

* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)

* See LICENSE for the license information

* -------------------------------------------------------------------------- */

/**
* @file AugmentedLagrangian.h
* @brief Augmented Lagrangian function implemented as a nonlinear factor
* graph.
* @author Yetong Zhang
* @date Aug 3, 2024
*/

#pragma once

#include <gtsam/constrained/ConstrainedOptProblem.h>

namespace gtsam {

/**
* Lagrange dual function for equality constraints and inequality constraints
* m(x) = 0.5 * ||f(x)||^2 - lambdaEq * h(x) + 0.5 * muEq * ||h(x)||^2
* - lambdaIneq * g(x) + 0.5 * muIneq * ||g(x)_-||^2
* To express in nonlinear least squares form, it is rewritten as
* m(x)
* = m(x) + 0.5 * epsilon * ||g(x)||^2
* = 0.5 * ||f(x)||^2
* + (0.5 * muEq * ||h(x)||^2 - lambdaEq * h(x))
* + (0.5 * epsilon * ||g(x)||^2 - lambdaIneq * g(x))
* + 0.5 * muIneq * ||g(x)_-||^2
* - 0.5 * epsilon * ||g(x)||^2
* = 0.5 * ||f(x)||^2
* + 0.5 * muEq * ||h(x)- lambdaEq/muEq||^2
* + 0.5 * epsilon * ||g(x)-lambdaIneq/epsilon||^2
* + 0.5 * muIneq * ||g(x)_-||^2
* - 0.5 * epsilon * ||g(x)||^2
* - c
* where
* c = ||lambdaEq||^2 / (2 * muEq) + ||lambdaIneq||^2 / (2 * epsilon)
* is a constant term,
* and epsilon can be any positive scalar value.
*
* Notice: the purpose of epsilon is to incorporate (-lambdaIneq * g(x)) in
* nonlinear least squares form. To do so, we manually create an additional
* term (0.5 * epsilon * ||g(x)||^2), which is added and then subtracted in
* the merit function. The term (-lambdaIneq * g(x)) and (0.5 * epsilon *
* ||g(x)||^2) can be combined as a least-square term, and the subtraction of
* (0.5 * epsilon * ||g(x)||^2) can be performed with anti-factor.
* @return: factor graph representing m(x) + 0.5 * epsilon * ||g(x)||^2 + c
*/
GTSAM_EXPORT NonlinearFactorGraph AugmentedLagrangianFunction(
const ConstrainedOptProblem& problem, const std::vector<Vector>& lambdaEq,
const std::vector<double>& lambdaIneq, double muEq, double muIneq,
InequalityPenaltyFunction::shared_ptr ineqConstraintPenaltyFunction,
const double epsilon = 1.0);
} // namespace gtsam
98 changes: 4 additions & 94 deletions gtsam/constrained/AugmentedLagrangianOptimizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,77 +16,15 @@
* @date Aug 3, 2024
*/

#include <gtsam/constrained/AugmentedLagrangian.h>
#include <gtsam/constrained/AugmentedLagrangianOptimizer.h>
#include <gtsam/slam/AntiFactor.h>

#include <iomanip>

using std::setw, std::cout, std::endl, std::setprecision;

namespace gtsam {

/** A factor that adds a constant bias term to the original factor.
* This factor is used in augmented Lagrangian optimizer to create biased cost
* functions.
* Note that the noise model is stored twice (both in original factor and the
* noise model of substitute factor. The noise model in the original factor will
* be ignored. */
class GTSAM_EXPORT BiasedFactor : public NoiseModelFactor {
protected:
typedef NoiseModelFactor Base;
typedef BiasedFactor This;

// original factor
Base::shared_ptr originalFactor_;
Vector bias_;

public:
typedef std::shared_ptr<This> shared_ptr;

/** Default constructor for I/O only */
BiasedFactor() {}

/** Destructor */
~BiasedFactor() override {}

/**
* Constructor
* @param originalFactor original factor on X
* @param bias the bias term
*/
BiasedFactor(const Base::shared_ptr& originalFactor, const Vector& bias)
: Base(originalFactor->noiseModel(), originalFactor->keys()),
originalFactor_(originalFactor),
bias_(bias) {}

/**
* Error function *without* the NoiseModel, \f$ z-h(x) \f$.
* Override this method to finish implementing an N-way factor.
* If the optional arguments is specified, it should compute
* both the function evaluation and its derivative(s) in H.
*/
virtual Vector unwhitenedError(
const Values& x,
gtsam::OptionalMatrixVecType H = nullptr) const override {
return originalFactor_->unwhitenedError(x, H) + bias_;
}

/** print */
void print(const std::string& s, const KeyFormatter& keyFormatter =
DefaultKeyFormatter) const override {
std::cout << s << "BiasedFactor " << bias_.transpose()
<< " version of:" << std::endl;
originalFactor_->print(s, keyFormatter);
}

/** Return a deep copy of this factor. */
gtsam::NonlinearFactor::shared_ptr clone() const override {
return std::static_pointer_cast<gtsam::NonlinearFactor>(
gtsam::NonlinearFactor::shared_ptr(new This(*this)));
}

}; // \class BiasedFactor

/* ************************************************************************* */
void AugmentedLagrangianState::initializeLagrangeMultipliers(
const ConstrainedOptProblem& problem) {
Expand Down Expand Up @@ -151,37 +89,9 @@ Values AugmentedLagrangianOptimizer::optimize() const {
/* ************************************************************************* */
NonlinearFactorGraph AugmentedLagrangianOptimizer::augmentedLagrangianFunction(
const State& state, const double epsilon) const {
// Initialize by adding in cost factors.
NonlinearFactorGraph graph = problem_.costs();

// Create factors corresponding to equality constraints.
const NonlinearEqualityConstraints& eqConstraints = problem_.eConstraints();
const double& muEq = state.muEq;
for (size_t i = 0; i < eqConstraints.size(); i++) {
const auto& constraint = eqConstraints.at(i);
Vector bias = state.lambdaEq[i] / muEq;
bias = bias.cwiseProduct(constraint->sigmas());
auto penalty_l2 = constraint->penaltyFactor(muEq);
graph.emplace_shared<BiasedFactor>(penalty_l2, bias);
}

// Create factors corresponding to penalty terms of inequality constraints.
const NonlinearInequalityConstraints& ineqConstraints =
problem_.iConstraints();
const double& muIneq = state.muIneq;
graph.add(ineqConstraints.penaltyGraphCustom(
p_->ineqConstraintPenaltyFunction, muIneq));

// Create factors corresponding to Lagrange multiplier terms of i-constraints.
for (size_t i = 0; i < ineqConstraints.size(); i++) {
const auto& constraint = ineqConstraints.at(i);
Vector bias = state.lambdaIneq[i] / epsilon * constraint->sigmas();
auto penalty_l2 = constraint->penaltyFactorEquality(epsilon);
graph.emplace_shared<BiasedFactor>(penalty_l2, bias);
graph.emplace_shared<AntiFactor>(penalty_l2);
}

return graph;
return AugmentedLagrangianFunction(
problem_, state.lambdaEq, state.lambdaIneq, state.muEq, state.muIneq,
p_->ineqConstraintPenaltyFunction, epsilon);
}

/* ************************************************************************* */
Expand Down
2 changes: 1 addition & 1 deletion gtsam/constrained/AugmentedLagrangianOptimizer.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ class GTSAM_EXPORT AugmentedLagrangianState : public PenaltyOptimizerState {
class GTSAM_EXPORT AugmentedLagrangianOptimizer : public ConstrainedOptimizer {
public:
using Base = ConstrainedOptimizer;
using This = PenaltyOptimizer;
using This = AugmentedLagrangianOptimizer;
using shared_ptr = std::shared_ptr<This>;

using Params = AugmentedLagrangianParams;
Expand Down
Loading
Loading