From f426d2ea54d4d5aeb391e722d71672d00c6343f0 Mon Sep 17 00:00:00 2001 From: Dan McGann Date: Sun, 19 Oct 2025 13:08:44 -0400 Subject: [PATCH 01/17] Copy IMPL from risam-v2 --- gtsam/sam/RISAM.cpp | 311 +++++++++++++++++++++++++++++ gtsam/sam/RISAM.h | 249 +++++++++++++++++++++++ gtsam/sam/RISAMGraduatedFactor.cpp | 35 ++++ gtsam/sam/RISAMGraduatedFactor.h | 167 ++++++++++++++++ gtsam/sam/RISAMGraduatedKernel.cpp | 182 +++++++++++++++++ gtsam/sam/RISAMGraduatedKernel.h | 197 ++++++++++++++++++ 6 files changed, 1141 insertions(+) create mode 100644 gtsam/sam/RISAM.cpp create mode 100644 gtsam/sam/RISAM.h create mode 100644 gtsam/sam/RISAMGraduatedFactor.cpp create mode 100644 gtsam/sam/RISAMGraduatedFactor.h create mode 100644 gtsam/sam/RISAMGraduatedKernel.cpp create mode 100644 gtsam/sam/RISAMGraduatedKernel.h diff --git a/gtsam/sam/RISAM.cpp b/gtsam/sam/RISAM.cpp new file mode 100644 index 0000000000..4367d434b8 --- /dev/null +++ b/gtsam/sam/RISAM.cpp @@ -0,0 +1,311 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file RISAM.cpp + * @brief Implementation of the RISAM algorithm. + * @author Dan McGann + * @date October 2025 + */ + + +#include + +#include + +#include "risam/GraduatedFactor.h" + +namespace risam { + +/** + * #### ## ## ######## ######## ######## ######## ### ###### ######## + * ## ### ## ## ## ## ## ## ## ## ## ## ## + * ## #### ## ## ## ## ## ## ## ## ## ## + * ## ## ## ## ## ###### ######## ###### ## ## ## ###### + * ## ## #### ## ## ## ## ## ######### ## ## + * ## ## ### ## ## ## ## ## ## ## ## ## ## + * #### ## ## ## ######## ## ## ## ## ## ###### ######## + */ + +/*********************************************************************************************************************/ +RISAM::UpdateResult RISAM::update(const gtsam::NonlinearFactorGraph& new_factors, const gtsam::Values& new_theta, + const boost::optional> extra_gnc_involved_keys, + const gtsam::FactorIndices& remove_factor_indices, + const boost::optional>& constrained_keys, + const boost::optional>& no_relin_keys, + const boost::optional>& extra_reelim_keys, + bool force_relinearize) { + gtsam::ISAM2UpdateParams update_params; + update_params.removeFactorIndices = remove_factor_indices; + update_params.constrainedKeys = constrained_keys; + update_params.noRelinKeys = no_relin_keys; + update_params.extraReelimKeys = extra_reelim_keys; + update_params.force_relinearize = force_relinearize; + return update(new_factors, new_theta, extra_gnc_involved_keys, update_params); +} + +/*********************************************************************************************************************/ +RISAM::UpdateResult RISAM::update(const gtsam::NonlinearFactorGraph& new_factors, const gtsam::Values& new_theta, + const boost::optional> extra_gnc_involved_keys, + const gtsam::ISAM2UpdateParams& update_params) { + // Determine if the update includes any graduated factors (i.e. potential outliers) + bool update_includes_potential_outliers = false; + for (auto& factor : new_factors) { + auto grad_factor = boost::dynamic_pointer_cast(factor); + if (grad_factor) update_includes_potential_outliers = true; + } + + // Update housekeeping for any mu_inits + updateHouseKeeping(new_factors, update_params); + + // Run the update: robust if we have potential outliers (or if requested) otherwise standard iSAM2 + UpdateResult result; + if (extra_gnc_involved_keys || update_includes_potential_outliers) { + result = updateRobust(new_factors, new_theta, extra_gnc_involved_keys, update_params); + } else { + result.isam2_result = solver_->update(new_factors, new_theta, update_params); + solver_->runBackSubstitution(); + } + + // If we have converged update mu_inits_based on status + if (params_.increment_outlier_mu) incrementMuInits(); + + return result; +} + +/*********************************************************************************************************************/ +RISAM::UpdateResult RISAM::updateRobust(const gtsam::NonlinearFactorGraph& new_factors, const gtsam::Values& new_theta, + const boost::optional> extra_gnc_involved_keys, + const gtsam::ISAM2UpdateParams& update_params) { + // Setup the result structure + UpdateResult update_result; + + // Create params for the initial update, note these may be modified by convexifyInvolvedFactors + gtsam::ISAM2UpdateParams initial_update_params = update_params; + + // Convexify involved factors + std::set convex_factors = + convexifyInvolvedFactors(new_factors, new_theta, extra_gnc_involved_keys, initial_update_params, update_result); + /// Setup a container to count the number of times mu is updated for each convex factor + std::map mu_update_count; + for (auto& fidx : convex_factors) mu_update_count[fidx] = 0; + + // Run the initial update to add new factors, after this the iSAM2 factors/var_index will match the risam copies + update_result.isam2_result = solver_->update(new_factors, new_theta, initial_update_params); + solver_->runBackSubstitution(); + + // Run GNC iterations until all convex factors have converged w.r.t. mu_ + gtsam::FactorIndices remaining_convex_factors(convex_factors.begin(), convex_factors.end()); + gtsam::ISAM2UpdateParams update_params_internal; // Used to force updates convex factors + ensure better ordering + while (remaining_convex_factors.size() > 0) { + // Get the current solution + gtsam::Values current_est = solver_->calculateEstimate(); + // Update mu for all convex factors and get the convex keys + gtsam::FastList convexKeys; + for (gtsam::FactorIndex fidx : remaining_convex_factors) { + auto grad_factor = boost::dynamic_pointer_cast(factors_.at(fidx)); + *(mu_[fidx]) = + grad_factor->kernel()->updateMu(*(mu_[fidx]), grad_factor->residual(current_est), mu_update_count[fidx]); + convexKeys.insert(convexKeys.end(), factors_.at(fidx)->begin(), factors_.at(fidx)->end()); + mu_update_count[fidx]++; + } + + // Run the Update, re-eliminating the subproblem defined at this time-step + update_params_internal.extraReelimKeys = convexKeys; // Force update to convex factors + update_params_internal.constrainedKeys = + gtsam::FastMap(); // Prevent iSAM2 from ordering convex factors first + solver_->update(gtsam::NonlinearFactorGraph(), gtsam::Values(), update_params_internal); + solver_->runBackSubstitution(); + + // Update set of convex Factors + gtsam::FactorIndices new_remaining_convex_factors; + for (gtsam::FactorIndex fidx : remaining_convex_factors) { + auto grad_factor = boost::dynamic_pointer_cast(factors_.at(fidx)); + if (!grad_factor->kernel()->isMuConverged(*(mu_[fidx]))) new_remaining_convex_factors.push_back(fidx); + } + remaining_convex_factors = new_remaining_convex_factors; + } + + for (size_t i = 0; i < params_.number_extra_iters; i++) { + // Orig RISAM preformed 1 extra iteration + solver_->update(gtsam::NonlinearFactorGraph(), gtsam::Values(), gtsam::ISAM2UpdateParams()); + solver_->runBackSubstitution(); + } + + return update_result; +} + +/*********************************************************************************************************************/ +gtsam::Values RISAM::calculateEstimate() { return solver_->calculateEstimate(); } + +/*********************************************************************************************************************/ +void RISAM::forceSetLinearization(const gtsam::Key& key, const gtsam::Value& value) { + solver_->forceSetLinearization(key, value); +} + +/*********************************************************************************************************************/ +std::set RISAM::getOutliers(double chi2_outlier_thresh) { + std::set outlier_factors; + gtsam::Values current_est = solver_->calculateEstimate(); + + for (size_t i = 0; i < factors_.size(); i++) { + gtsam::NonlinearFactor::shared_ptr nlf_ptr = factors_.at(i); + GraduatedFactor::shared_ptr grad_ptr = boost::dynamic_pointer_cast(nlf_ptr); + if (grad_ptr) { + auto dist = boost::math::chi_squared_distribution(nlf_ptr->dim()); + const double thresh = boost::math::quantile(dist, chi2_outlier_thresh); + const double residual = grad_ptr->residual(current_est); + if (residual > thresh) outlier_factors.insert(i); + } + } + return outlier_factors; +} + +/** + * ## ## ######## ## ######## ######## ######## ###### + * ## ## ## ## ## ## ## ## ## ## ## + * ## ## ## ## ## ## ## ## ## ## + * ######### ###### ## ######## ###### ######## ###### + * ## ## ## ## ## ## ## ## ## + * ## ## ## ## ## ## ## ## ## ## + * ## ## ######## ######## ## ######## ## ## ###### + */ + +/*********************************************************************************************************************/ +void RISAM::updateHouseKeeping(const gtsam::NonlinearFactorGraph& new_factors, + const gtsam::ISAM2UpdateParams& update_params) { + // To match the behavior of iSAM2 and ensure matching indices we first add info for new factors then remove info + + // Add the factors and get their new indices + gtsam::FactorIndices new_factor_indices = + factors_.add_factors(new_factors, params_.isam2_params.findUnusedFactorSlots); + // Update the variable index for the new factors + variable_index_.augment(new_factors, new_factor_indices); + // Add a mu and mu_init entry for each factor + augmentMu(new_factors, new_factor_indices); + + // Once we have added new factors we can remove any factors and corresponding info + gtsam::NonlinearFactorGraph removed_factors; + removed_factors.reserve(update_params.removeFactorIndices.size()); + for (const auto fidx : update_params.removeFactorIndices) { + removed_factors.push_back(factors_.at(fidx)); + factors_.remove(fidx); + mu_.at(fidx).reset(); + mu_inits_.at(fidx).reset(); + } + variable_index_.remove(update_params.removeFactorIndices.begin(), update_params.removeFactorIndices.end(), + removed_factors); +} + +/*********************************************************************************************************************/ +void RISAM::augmentMu(const gtsam::NonlinearFactorGraph& new_factors, const gtsam::FactorIndices& new_factor_indices) { + for (size_t i = 0; i < new_factors.nrFactors(); i++) { + gtsam::FactorIndex fidx = new_factor_indices[i]; + auto grad_factor = boost::dynamic_pointer_cast(new_factors.at(i)); + + // Get the pointers to mu and mu_init for this factor + std::shared_ptr mu, mu_init; + if (grad_factor) { + mu = grad_factor->mu_; + mu_init = std::make_shared(grad_factor->kernel()->muInit()); + } else { + mu = std::make_shared(0); + mu_init = std::make_shared(0); + } + + // Extend or replace mu and mu_inits + if (fidx >= mu_.size()) { + mu_.push_back(mu); + mu_inits_.push_back(mu_init); + } else { + mu_[fidx] = mu; + mu_inits_[fidx] = mu_init; + } + } +} + +/*********************************************************************************************************************/ +void RISAM::incrementMuInits() { + gtsam::VectorValues delta = solver_->getDelta(); + if (delta.norm() / delta.size() < params_.outlier_mu_avg_var_convergence_thresh) { + gtsam::Values theta = solver_->calculateEstimate(); + for (auto fidx : factors_to_check_status_) { + auto grad_factor = boost::dynamic_pointer_cast(factors_[fidx]); + auto mahdist = grad_factor->residual(theta); + auto dist = boost::math::chi_squared_distribution(factors_[fidx]->dim()); + if (mahdist > boost::math::quantile(dist, params_.outlier_mu_chisq_upper_bound)) { + *(mu_inits_[fidx]) = grad_factor->kernel()->incrementMuInit(*(mu_inits_[fidx])); + } else if (mahdist < boost::math::quantile(dist, params_.outlier_mu_chisq_lower_bound)) { + *(mu_inits_[fidx]) = grad_factor->kernel()->incrementMuInitInv(*(mu_inits_[fidx])); + } + } + factors_to_check_status_.clear(); + } +} + +/*********************************************************************************************************************/ +std::set RISAM::convexifyInvolvedFactors( + const gtsam::NonlinearFactorGraph& new_factors, const gtsam::Values& new_theta, + const boost::optional> extra_gnc_involved_keys, gtsam::ISAM2UpdateParams& update_params, + UpdateResult& update_result) { + // Gather all involved keys - Involved keys are directly induced by the new factors + gtsam::KeySet new_factor_keys = new_factors.keys(); + gtsam::KeySet involved_keys = solver_->traverseTop(new_factors.keyVector()); + involved_keys.insert(new_factor_keys.begin(), new_factor_keys.end()); + + // Add to the gathered involved keys, any keys specified by the user + if (extra_gnc_involved_keys) { + involved_keys.insert(extra_gnc_involved_keys->begin(), extra_gnc_involved_keys->end()); + // For any user specified involved keys also add them to the extra-reelim + if (!update_params.extraReelimKeys) { + update_params.extraReelimKeys = gtsam::FastList(); // create if it does not exist + } + update_params.extraReelimKeys->insert(update_params.extraReelimKeys->end(), extra_gnc_involved_keys->begin(), + extra_gnc_involved_keys->end()); + } + + // Gather all affected keys - Super set of involved keys including keys modified by user params + auto [affected_keys, is_batch_update] = solver_->preComputeUpdateInfo(new_factors, new_theta, update_params); + + // Convexify update-involved factors + std::set convex_factors; + for (gtsam::Key affected_key : affected_keys) { + for (gtsam::FactorIndex fidx : variable_index_[affected_key]) { + auto grad_factor = boost::dynamic_pointer_cast(factors_.at(fidx)); + if (grad_factor) { + bool inside = true; // indicates that all variables touched by this factor are in the affected set + bool update_involved = false; // indicates a factor touches at least one variable in the involved set + for (gtsam::Key factor_key : factors_.at(fidx)->keys()) { + inside = inside && affected_keys.count(factor_key); + update_involved = update_involved || involved_keys.count(factor_key); + } + + // If the factor is update involved and marked as inside, we need to convexify + // Note: everything is inside on a batch update + if ((inside || is_batch_update) && update_involved) { + convex_factors.insert(fidx); + factors_to_check_status_.insert(fidx); + *(mu_[fidx]) = *(mu_inits_[fidx]); + } + } + } + } + + // Fill out the Update Result + update_result.involved_variables = involved_keys; + update_result.convexified_factors = convex_factors; + update_result.affected_variables = affected_keys; + + // Return + return convex_factors; +} + +} // namespace risam \ No newline at end of file diff --git a/gtsam/sam/RISAM.h b/gtsam/sam/RISAM.h new file mode 100644 index 0000000000..8823405080 --- /dev/null +++ b/gtsam/sam/RISAM.h @@ -0,0 +1,249 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file RISAM.h + * @brief Robust Incremental Smoothing and Mapping (riSAM) + * @date October 2025 + * @author Dan McGann + */ +#pragma once +#include + +#include "risam/DoglegLineSearch.h" +#include "risam/ExtendedISAM2.h" + +namespace gtsam { + +class RISAM { + /** TYPES **/ + public: + /** @brief Struct Containing all configuration parameters for riSAM + * See below for details on each parameters + */ + struct Parameters { + /// @brief Explicit constructor to use default values + explicit Parameters() = default; + /// @brief The parameters for the encapsulated iSAM2 algorithm @Note some + /// are overriden in RISAM::RISAM() + ISAM2Params isam2_params; + /// @brief Optional params for custom incremental trust region methods + std::optional + optimization_params = DoglegLineSearch::Parameters(); + + /// @brief Flag to increment mu_init when we converge in values for outlier + /// factors + // See RISAM::IncrementMuInit for details + bool increment_outlier_mu{true}; + /// @brief Increment mu_init if chi^2 > upper bound + double outlier_mu_chisq_upper_bound{0.95}; + /// @brief Decrement mu_init if chi^2 < lower bound + double outlier_mu_chisq_lower_bound{0.25}; + /// @brief Average variable delta.norm() threshold used to identify variable + /// convergence + double outlier_mu_avg_var_convergence_thresh{0.01}; + /// @brief The number of extra iterations to perform after mu convergence + size_t number_extra_iters{1}; + }; + + /** @brief Struct containing information about the riSAM update + * See below for details about the information included + */ + struct UpdateResult { + /// @brief The iSAM2 result from the first internal update [Note riSAM may + /// run multiple internal updates] + ISAM2Result isam2_result; + + /// @brief The set variables directly involved in the update + std::set involved_variables; + /// @brief The set of variables affected by the update + std::set affected_variables; + /// @brief The set of factors convexified in this update + std::set convexified_factors; + }; + + /** FIELDS **/ + protected: + /// @brief Configuration parameters for the riSAM algorithm + Parameters params_; + /// @brief The encapsulated iSAM2 algorithm. We use ExtendedISAM2 to get some + /// additional functionality see ExtendedISAM2 for details + std::unique_ptr solver_; + /// @brief The current control parameter values for all factors. If factor i + /// is a GraduatedFactor mu_[i] is a reference to that factors internal state. + /// If factor i is not Graduated mu_[i] is a pointer to a value of Zero + FastVector> mu_; + /// @brief The current initial control parameter values for all factors. If + /// factor i is not Graduated mu_[i] is a pointer to a value of Zero + FastVector> mu_inits_; + /// @brief The set of GraduatedFactors that have been convexified since the + /// last mu_init_ increment These are the factors that we can increment mu's + /// for the next time variables converge. + FastSet factors_to_check_status_; + + /** + * RISAM maintains its own variable index and factors. These match exactly + * that of the underlying iSAM2 solver, but lead it since we update RISAM + * housekeeping before updating the underlying solver. + */ + /// @brief VariableIndex for the underlying system matches that of solver_ + /// after every update + VariableIndex variable_index_; + /// @brief The Factors for the underlying system matches that of solver_ after + /// every update + NonlinearFactorGraph factors_; + + /** INTERFACE **/ + public: + /** @brief Constructs an instance of the riSAM algorithm with provided + * configuration. Note this constructor will override some values in + * params.isam2_params. + * @param params: The configuration parameters for this instance of riSAM + * + */ + RISAM(const Parameters& params) : params_(params) { + // Override user preferences for iSAM2 params + params_.isam2_params.relinearizeSkip = + 1; // We must have relinearization skip as 1 + params_.isam2_params.cacheLinearizedFactors = + false; // We must not use cached factors + /** Param override explanation + * To ensure that we update the current estimated theta_ after each + * intermediate GNC iteration we must use relinearizeSkip = 1 so that we + * allow relinearization at any step it is needed. + * + * For each intermediate GNC iteration we update mu_ values for convex + * GraduatedFactors to ensure that GraduatedFactors are linearized according + * to these mu_ values we must turn off caching that could result in old mu_ + * being used. + */ + + // Construct the encapsulated solver using the modified parameters. + solver_ = std::make_unique(params_.isam2_params, + params_.optimization_params); + } + + /// @brief Update Interface. See ISAM2 docs for details as parameters match + /// (almost) exactly + /// @param extra_gnc_involved_keys - overrides internal RISAM logic and + /// performs a robust update. All extra involved keys will be treated as being + /// apart of the current update wrt factor convexification + UpdateResult update( + const NonlinearFactorGraph& new_factors = + NonlinearFactorGraph(), + const Values& new_theta = Values(), + const boost::optional> extra_gnc_involved_keys = + boost::none, + const FactorIndices& remove_factor_indices = + FactorIndices(), + const boost::optional>& constrained_keys = + boost::none, + const boost::optional>& no_relin_keys = + boost::none, + const boost::optional>& extra_reelim_keys = + boost::none, + bool force_relinearize = false); + + /// @brief Update Interface. See ISAM2 docs for details as parameters match + /// (almost) exactly + /// @param force_gnc_solve - overrides internal RISAM logic and performs a + /// robust update + UpdateResult update( + const NonlinearFactorGraph& new_factors, + const Values& new_theta, + const boost::optional> extra_gnc_involved_keys, + const ISAM2UpdateParams& update_params); + + /// @brief Returns the current estimate from the solver + Values calculateEstimate(); + + /// @brief Returns the underlying factors of the system + NonlinearFactorGraph getFactorsUnsafe() { return factors_; } + + /** @brief Permits manual modification of the solver's linearization point. + * This function can be used for forcefully set the linearization point used + * by the solver to a specific value. The variable that is modified should be + * marked for re-elimination during the next update to see effects. WARN: This + * function can have strange effects and should only be used by developers who + * know what they are doing. + * @param key: The variable for which to modify the linearization point + * @param value: The new linearization point + */ + void forceSetLinearization(const Key& key, const Value& value); + + /** @brief Returns the set of measurements (identified by their factor index) + * that have been deems outliers. riSAM defines a measurement as an outlier if + * its current residual is greater than the chi2_threshold provided + * @param chi2_outlier_thresh - The chi2 threshold used to define outliers + * WARN: Potentially slow since we iterate over all factors + * @returns The set of factors that are considered outliers by the RISAM + * algorithm. + */ + std::set getOutliers(double chi2_outlier_thresh); + + /** HELPERS **/ + protected: + /** @brief Preforms a robust update to the system. + * A robust update is required any time new_factors contains GraduatedFactors + * (i.e. there are potentially new inlier/outlier measurements) + * @see ISAM2::update for details on params + */ + UpdateResult updateRobust( + const NonlinearFactorGraph& new_factors, + const Values& new_theta, + const boost::optional> extra_gnc_involved_keys, + const ISAM2UpdateParams& update_params); + + /** @brief Update housekeeping information for riSAM this involved: + * 1. Determining the indicies that each factor in new_factors will be + * inserted into + * 2. Update the riSAM fields: factors_ and variable_index_ (they will be + * ahead of those in solver_ until solver.update() is called) + * 3. Update mu_ and mu_init_ for the new factors + * @param new_factors: The new factors for the current update + * @param update_params: The update parameters for the current update + */ + void updateHouseKeeping(const NonlinearFactorGraph& new_factors, + const ISAM2UpdateParams& update_params); + + /** @brief Extend or update mu and mu_init for new factors + * @param new_factors: The new factors for which to extend the containers + * @param new_factor_indicies: the indicies assigned to each new_factor + */ + void augmentMu(const NonlinearFactorGraph& new_factors, + const FactorIndices& new_factor_indices); + + /** @brief Increments mu_inits_ for any factor recently convexified. + * Used to mitigate the long-term affect of measurements that we are confident + * are outliers + */ + void incrementMuInits(); + + /** @brief Convexifies factors involved in the update + * Finds all factors inside the total affected set, and involved in the update + * defined by new factors. Marks those factors as convex and resets mu_ to the + * factors current mu_init_ See RISAM::update for first three parameter + * details + * @param update_result: structure containing information about this update, + * modified by this function to fill in info about involved and affected + * variables as well as convexified factors + * @returns The indicies of all factors convexified in the update defined by + * the given parameters. + */ + std::set convexifyInvolvedFactors( + const NonlinearFactorGraph& new_factors, + const Values& new_theta, + const boost::optional> extra_gnc_involved_keys, + ISAM2UpdateParams& internal_update_params, + UpdateResult& update_result); +}; + +} // namespace risam \ No newline at end of file diff --git a/gtsam/sam/RISAMGraduatedFactor.cpp b/gtsam/sam/RISAMGraduatedFactor.cpp new file mode 100644 index 0000000000..e31d83098b --- /dev/null +++ b/gtsam/sam/RISAMGraduatedFactor.cpp @@ -0,0 +1,35 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + + +#include "risam/GraduatedFactor.h" + +namespace risam { +/* ************************************************************************* */ +GraduatedFactor::GraduatedFactor(GraduatedKernel::shared_ptr kernel) : kernel_(kernel) { + mu_ = std::make_shared(kernel_->muInit()); +} + +/* ************************************************************************* */ +GraduatedFactor::GraduatedFactor(const GraduatedFactor& other) : kernel_(other.kernel_) { + mu_ = std::make_shared(*(other.mu_)); +} + +/* ************************************************************************* */ +const GraduatedKernel::shared_ptr GraduatedFactor::kernel() const { return kernel_; } + +/* ************************************************************************* */ +void GraduatedFactor::updateKernel(const GraduatedKernel::shared_ptr& new_kernel) { + kernel_ = new_kernel; + *mu_ = new_kernel->muInit(); +} + +} // namespace risam \ No newline at end of file diff --git a/gtsam/sam/RISAMGraduatedFactor.h b/gtsam/sam/RISAMGraduatedFactor.h new file mode 100644 index 0000000000..76212b819f --- /dev/null +++ b/gtsam/sam/RISAMGraduatedFactor.h @@ -0,0 +1,167 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + + +/** @brief Factor that implements a graduated robust cost function. + * A Graduated factor overrides a portion of the NoiseModelFactor interface to implement a graduated robust cost + * function for the factor. + * + * @author Dan McGann + * @date Mar 2022 + */ +#pragma once +#include + +#include "risam/GraduatedKernel.h" +#include "risam/RISAM.h" + +namespace risam { +class GraduatedFactor { + /** TYPES **/ + public: + typedef boost::shared_ptr shared_ptr; + + /** FIELDS **/ + protected: + /// @brief The Graduated Robust Kernel for this factor + GraduatedKernel::shared_ptr kernel_; + /// @brief The unique mu control parameter for this factor + std::shared_ptr mu_; + + /// befriend RISAM to give access to these protected values + friend class RISAM; + + /**INTERFACE**/ + public: + /** @brief Constructor + * @param kernel: The graduated kernel to apply to this factor + * @param args: The arguments required to construct the FACTOR_TYPE + */ + GraduatedFactor(GraduatedKernel::shared_ptr kernel); + + /// @brief Copy constructor + GraduatedFactor(const GraduatedFactor& other); + + /** @brief Linearize this factor at a specific point, using the specified convexification parameter mu + * @param current_estimate: the variable estimate at which to linearize the Factor + * @param mu: the current value of the convexification parameter for this factor + */ + virtual gtsam::GaussianFactor::shared_ptr linearizeRobust(const gtsam::Values& current_estimate) const = 0; + + /// @brief returns the residual of the factor + virtual double residual(const gtsam::Values& current_estimate) const = 0; + + /// @brief returns \rho(r) of the factor + virtual double robustResidual(const gtsam::Values& current_estimate) const = 0; + + /// @brief Returns the value of \mu_{init} for this graduated kernel + const GraduatedKernel::shared_ptr kernel() const; + + /// @brief Updates the Kernel of this Graduated Factor + /// WARN: Resets mu_ to the mu_init of the kernel + void updateKernel(const GraduatedKernel::shared_ptr& new_kernel); + + /// @brief Copies this factor as an instance of its base type without the graduated kernel + virtual gtsam::NonlinearFactor::shared_ptr cloneUngraduated() const = 0; +}; + +template +class GenericGraduatedFactor : public FACTOR_TYPE, public GraduatedFactor { + static_assert(std::is_base_of::value, + "GraduatedFactor Must be instantiated with a Factor Derived from gtsam::NonlinearFactor."); + + /** TYPES **/ + public: + typedef boost::shared_ptr> shared_ptr; + + /**INTERFACE**/ + public: + /** @brief Constructor + * @param args: The arguments required to construct the FACTOR_TYPE + */ + template + GenericGraduatedFactor(GraduatedKernel::shared_ptr kernel, Args&&... args) + : FACTOR_TYPE(std::forward(args)...), GraduatedFactor(kernel) {} + + /// @brief Copy constructor + GenericGraduatedFactor(const GenericGraduatedFactor& other) + : FACTOR_TYPE(other), GraduatedFactor(other.kernel_) {} + + /// @brief makes a deep copy + gtsam::NonlinearFactor::shared_ptr clone() const override { + return boost::static_pointer_cast( + gtsam::NonlinearFactor::shared_ptr(new GenericGraduatedFactor(*this))); + } + + gtsam::GaussianFactor::shared_ptr linearize(const gtsam::Values& current_estimate) const override { + return linearizeRobust(current_estimate); + } + + /// @brief For graduated factors return 0.5 \rho(r)^2 + double error(const gtsam::Values& values) const override { return 0.5 * std::pow(robustResidual(values), 2); } + + /** GRADUATED INTERFACE **/ + gtsam::GaussianFactor::shared_ptr linearizeRobust(const gtsam::Values& current_estimate) const override { + double r = residual(current_estimate); + + gtsam::Matrix A; + gtsam::Vector b; + auto whitened_linear_system = FACTOR_TYPE::linearize(current_estimate); + std::tie(A, b) = whitened_linear_system->jacobian(); + size_t output_dim = b.size(); + + // Extract the non-dense linear system + auto keys = whitened_linear_system->keys(); + std::vector Ablocks; + size_t idx_start = 0; + for (const auto& key : keys) { + size_t d = current_estimate.at(key).dim(); + gtsam::Matrix vblock = gtsam::Matrix::Zero(output_dim, d); + Ablocks.push_back(A.block(0, idx_start, output_dim, d)); + idx_start += d; + } + + // Weight the Linearized Blocks + kernel_->weightSystem(Ablocks, b, r, *mu_); + + // Construct a jacobian factor from the weighted system + gtsam::FastMap Ablock_map; + for (size_t i = 0; i < Ablocks.size(); i++) { + Ablock_map[keys[i]] = Ablocks[i]; + } + return boost::make_shared(Ablock_map, b); + } + + /* ************************************************************************* */ + double residual(const gtsam::Values& current_estimate) const override { + return sqrt(2.0 * FACTOR_TYPE::error(current_estimate)); + } + + /* ************************************************************************* */ + double robustResidual(const gtsam::Values& current_estimate) const override { + return kernel_->error(residual(current_estimate), *mu_); + } + + /* ************************************************************************* */ + gtsam::NonlinearFactor::shared_ptr cloneUngraduated() const override { return FACTOR_TYPE::clone(); } +}; + +/** HELPERS **/ +template +GenericGraduatedFactor make_graduated(Args&&... args) { + return GenericGraduatedFactor(std::forward(args)...); +} + +template +typename GenericGraduatedFactor::shared_ptr make_shared_graduated(Args&&... args) { + return boost::make_shared>(std::forward(args)...); +} +} // namespace risam \ No newline at end of file diff --git a/gtsam/sam/RISAMGraduatedKernel.cpp b/gtsam/sam/RISAMGraduatedKernel.cpp new file mode 100644 index 0000000000..a84ec00f1a --- /dev/null +++ b/gtsam/sam/RISAMGraduatedKernel.cpp @@ -0,0 +1,182 @@ +#include "risam/GraduatedKernel.h" + +#include + +#include "risam/Utilities.h" +namespace risam { + +/*********************************************************************************************************************/ +double SIGKernel::error(const double &residual, const double &mu) const { + double r2 = residual * residual; + double c2 = params_.shape_param * params_.shape_param; + return 0.5 * (c2 * r2) / (c2 + pow(r2, mu)); +} + +/*********************************************************************************************************************/ +double SIGKernel::weight(const double &residual, const double &mu) const { + double r2 = residual * residual; + double c2 = params_.shape_param * params_.shape_param; + double sqrt_denom = c2 + pow(r2, mu); + return (c2 * (c2 + pow(r2, mu) * (1 - mu))) / (sqrt_denom * sqrt_denom); +} + +/*********************************************************************************************************************/ +double SIGKernel::updateMu(const double &mu, const double &residual, const size_t &update_count) const { + if (params_.mu_update_strat == MuUpdateStrategy::MCGANN_2023) { + // Mu update empirically discovered and presented in the orig riSAM paper + return std::min(1.0, mu + (mu + 0.1) * 1.2); + } else if (params_.mu_update_strat == MuUpdateStrategy::SIMPLE_HUBER) { + // Simple mu update of [mu_init, 0.5, 1.0] where 0.5 results in approximately the huber kernel + return mu < 0.5 ? 0.5 : 1.0; + } else if (params_.mu_update_strat == MuUpdateStrategy::KANG_CODE_2023) { + // 3 Step process [mu_init, mu*, 1] where mu* is the inflection point for the current residual + if (!params_.kang_residual_threshold) { + throw std::runtime_error("mu_update_strat == KANG_CODE_2023 but no kang_residual_threshold provided in params"); + } + if (update_count == 0 && residual > params_.shape_param / std::sqrt(3) && + residual < (*params_.kang_residual_threshold)) { + double mu_inflection = bisectionMuSearch( + [residual, this](double new_mu) { return secondDerivative(residual, new_mu); }, 0.5, 1.0, 1e-9); + // If the inflection point is less than the current mu_init skip to mu_final + return mu_inflection <= mu ? 1.0 : mu_inflection; + } else { + // If update_count > 0 then we have already run the mu* update, so return 1.0 + // If r < c/sqrt(3) then the result will be > 1.0 so skip search and return 1.0 + // If r > kang_residual_threshold after the convex iter then this measurement is most def an outlier so go to 1.0 + // Note: This final condition is present in the code, but not in the paper by Kang et al. + return 1.0; + } + } else if (params_.mu_update_strat == MuUpdateStrategy::KANG_PAPER_2023) { + // 3 Step process [mu_init, mu*, 1] where mu* is the inflection point for the current residual + if (update_count == 0 && residual > params_.shape_param / std::sqrt(3)) { + double mu_inflection = bisectionMuSearch( + [residual, this](double new_mu) { return secondDerivative(residual, new_mu); }, 0.5, 1.0, 1e-9); + // If the inflection point is less than the current mu_init skip to mu_final + return mu_inflection <= mu ? 1.0 : mu_inflection; + } else { + // If update_count > 0 then we have already run the mu* update, so return 1.0 + // If r < c/sqrt(3) then the result will be > 1.0 so skip search and return 1.0 + return 1.0; + } + } else if (params_.mu_update_strat == MuUpdateStrategy::ONESTEP) { + // return mu_final after running with mu_init + return 1.0; + } else if (params_.mu_update_strat == MuUpdateStrategy::HALVES) { + // Arithmetic Series: mu[t+1] = mu[t] + (1/2) + return std::min(1.0, mu + 0.5); + } else if (params_.mu_update_strat == MuUpdateStrategy::THIRDS) { + // Arithmetic Series: mu[t+1] = mu[t] + (1/3) + return std::min(1.0, mu + (1.0 / 3.0)); + } else if (params_.mu_update_strat == MuUpdateStrategy::FOURTHS) { + // Arithmetic Series: mu[t+1] = mu[t] + (1/4) + return std::min(1.0, mu + 0.25); + } else if (params_.mu_update_strat == MuUpdateStrategy::ASYM_FOURTHS) { + // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.5 if (t==0) else mu[t] + 0.25 + if (update_count == 0) return std::min(1.0, mu + 0.5); + return std::min(1.0, mu + 0.25); + } else if (params_.mu_update_strat == MuUpdateStrategy::NONCONVEX) { + // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.7 if (t==0) else mu[t] + 0.2 + if (update_count == 0) return std::min(1.0, mu + 0.7); + return std::min(1.0, mu + 0.2); + } else if (params_.mu_update_strat == MuUpdateStrategy::STABLE) { + // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.5 if (t==0), mu[t] + 0.4 if (t==1), else mu[t] + 0.05 + // My attempt to build the most stable method possible given lessons learned from multi-robot conditions + if (update_count == 0) return std::min(1.0, mu + 0.5); + if (update_count == 1) return std::min(1.0, mu + 0.4); + return std::min(1.0, mu + 0.05); + } else { + throw std::runtime_error("updateMu: requested MuUpdateStrategy is not implemented."); + } +} + +/*********************************************************************************************************************/ +double SIGKernel::incrementMuInit(const double &mu) const { + if (params_.mu_init_inc_strat == MuInitIncrementStrategy::MCGANN_2023) { + // Mu init update Empirically discovered and presented in the orig riSAM paper [Non-Symmetric] + return std::min(1.0, mu + (mu + 0.1) * 1.2); + } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_3) { + // Simple update with 3 steps from init to final + return std::min(1.0, mu + (1.0 / 3.0)); + } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_4) { + // Simple update with 4 steps from init to final + return std::min(1.0, mu + 0.25); + } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_5) { + // Simple update with 5 steps from init to final + return std::min(1.0, mu + 0.2); + } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_10) { + // Simple update with 10 steps from init to final + return std::min(1.0, mu + 0.1); + } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_20) { + // Simple update with 20 steps from init to final + return std::min(1.0, mu + 0.05); + } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_50) { + // Simple update with 50 steps from init to final + return std::min(1.0, mu + 0.02); + } else { + throw std::runtime_error("incrementMuInit: requested MuInitIncrementStrategy is not implemented."); + } +} + +/*********************************************************************************************************************/ +double SIGKernel::incrementMuInitInv(const double &mu) const { + if (params_.mu_init_inc_strat == MuInitIncrementStrategy::MCGANN_2023) { + // Mu init update Empirically discovered and presented in the orig riSAM paper [Non-Symmetric] + return std::max(0.0, mu - 0.1); + } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_3) { + // Simple Update with 3 steps from init to final + return std::max(0.0, mu - (1.0 / 3.0)); + } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_4) { + // Simple Update with 4 steps from init to final + return std::max(0.0, mu - (1.0 / 4.0)); + } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_5) { + // Simple update with 5 steps from init to final + return std::max(0.0, mu - 0.2); + } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_10) { + // Simple update with 10 steps from init to final + return std::max(0.0, mu - 0.1); + } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_20) { + // Simple update with 20 steps from init to final + return std::max(0.0, mu - 0.05); + } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_50) { + // Simple update with 50 steps from init to final + return std::max(0.0, mu - 0.02); + } else { + throw std::runtime_error("incrementMuInitInv: requested MuInitIncrementStrategy is not implemented."); + } +} + +/*********************************************************************************************************************/ +bool SIGKernel::isMuConverged(const double &mu) const { return mu >= convergence_thresh_; } + +/*********************************************************************************************************************/ +double SIGKernel::shapeParamFromInfThresh(double influence_thresh, size_t dof, double chi2_outlier_thresh) { + const double outlier_residual_thresh = + boost::math::quantile(boost::math::chi_squared_distribution(dof), chi2_outlier_thresh); + // Equation from taking derivative of SIGKernel setting equal to influence_thresh and solving for mu=1 + const double t1 = std::sqrt(2 * influence_thresh * std::pow(outlier_residual_thresh, 5)); + const double t2 = influence_thresh * std::pow(outlier_residual_thresh, 2); + const double t3 = influence_thresh - 2 * outlier_residual_thresh; + return std::sqrt(-((t1 + t2) / t3)); +} + +/*********************************************************************************************************************/ +double SIGKernel::shapeParamFromChi2(size_t dof, double chi2_threshold) { + return boost::math::quantile(boost::math::chi_squared_distribution(dof), chi2_threshold); +} + +/*********************************************************************************************************************/ +double SIGKernel::secondDerivative(const double &r, const double &mu) const { + // Setup helpful values + double c2 = params_.shape_param * params_.shape_param; + double c4 = c2 * c2; + double c6 = c4 * c2; + + // Calculate + double numerator = c6 - // Constant + (c4 * (mu + 2) * (2 * mu - 1) * std::pow(r, 2 * mu)) + // term 1 + (c2 * (mu - 1) * (2 * mu - 1) * std::pow(r, 4 * mu)); // term 2 + double denominator = std::pow(c2 + std::pow(r, 2 * mu), 3); + + return numerator / denominator; +} +} // namespace risam \ No newline at end of file diff --git a/gtsam/sam/RISAMGraduatedKernel.h b/gtsam/sam/RISAMGraduatedKernel.h new file mode 100644 index 0000000000..2c25a4c41b --- /dev/null +++ b/gtsam/sam/RISAMGraduatedKernel.h @@ -0,0 +1,197 @@ +/** @brief Interface for a graduated robust kernel. + * That is a robust kernel \rho(r_i, \mu) that varies from highly convex (quadratic) when \mu = \mu_{init} to non-convex + * (robust cost) as \mu trends towards \mu_{final} + * + * + * Citations: + * [2] - W. Kang et al. "Efficient Graduated Non-Convexity for Pose Graph Optimization", International Conference on + * Control, Automation and Systems (ICCAS), 2024 + * + * @author Dan McGann + * @date Mar 2022 + */ +#pragma once +#include +#include + +#include +#include + +namespace risam { +class GraduatedKernel { + /** TYPES **/ + public: + typedef boost::shared_ptr shared_ptr; + + /** FIELDS **/ + protected: + /// @brief The initial Value for mu + const double mu_init_; + /// @brief The threshold at which to consider mu to be converged + const double convergence_thresh_; + + /** INTERFACE **/ + public: + GraduatedKernel(double mu_init, double convergence_thresh) + : mu_init_(mu_init), convergence_thresh_(convergence_thresh) {} + + /** @brief Computes the graduated robust error of the function + * Error = \rho(r_i) + * + * @param residual: r_i the current (whitened) residual + * @param mu: \mu the current convexity parameter + */ + virtual double error(const double &residual, const double &mu) const = 0; + + /** @brief Computes the weight of the graduated robust cost function + * weight = w(r_i) + * + * @param residual: r_i the current (whitened) residual + * @param mu: \mu the current convexity parameter + */ + virtual double weight(const double &residual, const double &mu) const = 0; + + /** @brief Weights a linearized system + * Weight results in the solving of: w_i * r_i^2 = \sqrt(w_i) (A_i x + b_i) = \sqrt(w_i)* A_i x + \sqrt(w_i) * b_i + * + * @param A: Whitened linear system from a factor (\Sigma^{-0.5} H_i) + * @param b: Whitened linear system error from a factor (0.5 * \Sigma^(-0.5) ||h_i(x_i) - z_i||^2 = 0.5 * r_i^2) + */ + void weightSystem(std::vector &A, gtsam::Vector &b, const double &residual, const double &mu) const { + double sqrt_weight = sqrt(weight(residual, mu)); + for (gtsam::Matrix &Aj : A) { + Aj *= sqrt_weight; + } + b *= sqrt_weight; + } + + /// @brief Returns the value of \mu_{init} for this graduated kernel + double muInit() const { return mu_init_; } + + /// @brief Returns the next value of \mu for this graduated kernel + /// @param mu: The current value of mu + /// @param residual: The current residual of the factor + /// @param update_count: The number of mu updates during this graduated solve 0 = just made convex + virtual double updateMu(const double &mu, const double &residual, const size_t &update_count) const = 0; + + /// @brief Returns an incremented mu_init, used to decrease the influence of known outliers + virtual double incrementMuInit(const double &mu_init) const = 0; + + /// @brief Returns the previous value of mu_init for this graduated kernel + virtual double incrementMuInitInv(const double &mu) const = 0; + + /// @brief Returns true iff the value of \mu has converged to a non-convex state + virtual bool isMuConverged(const double &mu) const = 0; +}; + +/* ************************************************************************* */ +class SIGKernel : public GraduatedKernel { + /** My version of the Graduated version of the Geman-McClure kernel */ + + /** TYPES **/ + public: + /// @brief Shortcut for shared pointer + typedef boost::shared_ptr shared_ptr; + + /// @brief Strategies for updating Mu between GNC iterations of riSAM + enum class MuUpdateStrategy { + MCGANN_2023, // Geometric Series: mu[t+1] = mu[t] + (mu[t] + 0.1) * 1.2 + SIMPLE_HUBER, // Fixed Pattern [mu_init, 0.5, 1.0] + KANG_CODE_2023, // Sequence: [mu_init, mu*, 1.0] with early outlier rejection for r > kang_residual_threshold [2] + KANG_PAPER_2023, // Sequence: [mu_init, mu*, 1.0] (no early outlier rejection was presented in paper) [2] + ONESTEP, // Jump to mu=1.0 after initial convex step + HALVES, // Arithmetic Series: mu[t+1] = mu[t] + (1/2) + THIRDS, // Arithmetic Series: mu[t+1] = mu[t] + (1/3) + FOURTHS, // Arithmetic Series: mu[t+1] = mu[t] + (1/4) + ASYM_FOURTHS, // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.5 if (t==0) else mu[t] + 0.25 + NONCONVEX, // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.7 if (t==0) else mu[t] + 0.2 + STABLE // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.5 if (t==0), mu[t] + 0.4 if (t==1), else mu[t] + 0.05 + }; + /// @brief Strategies for update Mu_init after convergence + enum class MuInitIncrementStrategy { MCGANN_2023, EQUAL_3, EQUAL_4, EQUAL_5, EQUAL_10, EQUAL_20, EQUAL_50 }; + + struct Parameters { + /// @brief The shape parameter of the GM kernel. + double shape_param{1}; + /// @brief The update strategy to use for mu updates + MuUpdateStrategy mu_update_strat{MuUpdateStrategy::MCGANN_2023}; + /// @brief The update strategy to use for mu_init increments + MuInitIncrementStrategy mu_init_inc_strat{MuInitIncrementStrategy::MCGANN_2023}; + + /// @brief The residual threshold use to skip to mu=1.0 in the provided for [2] + /// Required only if mu_update_strat == KANG_CODE_2023 + std::optional kang_residual_threshold{std::nullopt}; + + /// @brief Default Constructor + explicit Parameters() = default; + /// @brief Parameterized Constructor + Parameters(double shape_param, MuUpdateStrategy mu_update_strat, MuInitIncrementStrategy mu_init_inc_strat, + std::optional kang_residual_threshold = std::nullopt) + : shape_param(shape_param), + mu_update_strat(mu_update_strat), + mu_init_inc_strat(mu_init_inc_strat), + kang_residual_threshold(kang_residual_threshold) {} + /// @brief Static Constructor to work around GCC Bug + static Parameters Default() { return Parameters(); }; + }; + + /** FIELDS **/ + public: + const Parameters params_; + + /** @brief Params Constructor + * @param params: The parameters to configure the kernel + */ + SIGKernel(const Parameters params = Parameters::Default()) : GraduatedKernel(0.0, 1.0), params_(params) {}; + + /** @brief Individual Parameter Constructor + * @param shape_param: The shape parameter for the kernel + * @param mu_update_strat: The update strategy to use for mu updates + * @param mu_init_inc_strat: The update strategy to use for mu_init increments + */ + SIGKernel(double shape_param, MuUpdateStrategy mu_update_strat, MuInitIncrementStrategy mu_init_inc_strat, + std::optional kang_residual_threshold = std::nullopt) + : GraduatedKernel(0.0, 1.0), + params_(Parameters(shape_param, mu_update_strat, mu_init_inc_strat, kang_residual_threshold)) {}; + + /** Interface **/ + public: + /// @brief @see GraduatedKernel + double error(const double &residual, const double &mu) const override; + /// @brief @see GraduatedKernel + double weight(const double &residual, const double &mu) const override; + /// @brief @see GraduatedKernel + double updateMu(const double &mu, const double &residual, const size_t &update_count) const override; + /// @brief @see GraduatedKernel + double incrementMuInit(const double &mu) const override; + /// @brief @see GraduatedKernel + double incrementMuInitInv(const double &mu) const override; + /// @brief @see GraduatedKernel + bool isMuConverged(const double &mu) const override; + + /** @brief Computes a shape param based on an an influence threshold for outliers + * Computes a shape param such that an outlier (any measurement with residual equal to or greater than the + * chi2_outlier_threshold) will have an "influence" (derivative of kernel at mu=1) less than or equal to the + * influence_threshold: d/dr(\rho(r^2)) < influence_thresh + * @param influence_thresh - The max influence permited by an outlier + * @param dof - The degrees of freedom of the corresponding measurement (use for chi2) + * @param chi2_outlier_thresh - The threshold for outlier (i.e. 0.95 = any measurement with residual greater than 95% + * of expected measurements is an outlier) + * @returns The shape param such that outliers will have inf <= influence_thresh + */ + static double shapeParamFromInfThresh(double influence_thresh, size_t dof, double chi2_outlier_thresh); + + /** @brief Computes a shape param based on dimensionality and a chi2_threshold + * Computes a shape param c = chi2.inv_cdf(dof, thresh) + * @param dof - The degrees of freedom of the corresponding measurement + * @param chi2_threshold - The the threshold at which to compute the shape param value + * @returns The shape param that scales based on dimensionality of the measurement + */ + static double shapeParamFromChi2(size_t dof, double chi2_threshold); + + /** HELPERS **/ + protected: + /// @brief Second derivative of the SIG Kernel wrt r + double secondDerivative(const double &r, const double &mu) const; +}; +} // namespace risam \ No newline at end of file From 5f49b886d48d0f9615f4928d2be90e5d7f465530 Mon Sep 17 00:00:00 2001 From: Dan McGann Date: Sun, 19 Oct 2025 13:13:25 -0400 Subject: [PATCH 02/17] Reformat initial riSAM code. --- gtsam/sam/RISAM.cpp | 188 ++++++++++++++++++----------- gtsam/sam/RISAM.h | 31 ++--- gtsam/sam/RISAMGraduatedFactor.cpp | 14 ++- gtsam/sam/RISAMGraduatedFactor.h | 58 ++++++--- gtsam/sam/RISAMGraduatedKernel.cpp | 115 +++++++++++------- gtsam/sam/RISAMGraduatedKernel.h | 133 ++++++++++++-------- 6 files changed, 336 insertions(+), 203 deletions(-) diff --git a/gtsam/sam/RISAM.cpp b/gtsam/sam/RISAM.cpp index 4367d434b8..920cb11be3 100644 --- a/gtsam/sam/RISAM.cpp +++ b/gtsam/sam/RISAM.cpp @@ -16,7 +16,6 @@ * @date October 2025 */ - #include #include @@ -26,23 +25,25 @@ namespace risam { /** - * #### ## ## ######## ######## ######## ######## ### ###### ######## + * #### ## ## ######## ######## ######## ######## ### ###### ######## * ## ### ## ## ## ## ## ## ## ## ## ## ## * ## #### ## ## ## ## ## ## ## ## ## ## * ## ## ## ## ## ###### ######## ###### ## ## ## ###### * ## ## #### ## ## ## ## ## ######### ## ## * ## ## ### ## ## ## ## ## ## ## ## ## ## - * #### ## ## ## ######## ## ## ## ## ## ###### ######## + * #### ## ## ## ######## ## ## ## ## ## ###### ######## */ /*********************************************************************************************************************/ -RISAM::UpdateResult RISAM::update(const gtsam::NonlinearFactorGraph& new_factors, const gtsam::Values& new_theta, - const boost::optional> extra_gnc_involved_keys, - const gtsam::FactorIndices& remove_factor_indices, - const boost::optional>& constrained_keys, - const boost::optional>& no_relin_keys, - const boost::optional>& extra_reelim_keys, - bool force_relinearize) { +RISAM::UpdateResult RISAM::update( + const gtsam::NonlinearFactorGraph& new_factors, + const gtsam::Values& new_theta, + const boost::optional> extra_gnc_involved_keys, + const gtsam::FactorIndices& remove_factor_indices, + const boost::optional>& constrained_keys, + const boost::optional>& no_relin_keys, + const boost::optional>& extra_reelim_keys, + bool force_relinearize) { gtsam::ISAM2UpdateParams update_params; update_params.removeFactorIndices = remove_factor_indices; update_params.constrainedKeys = constrained_keys; @@ -53,10 +54,13 @@ RISAM::UpdateResult RISAM::update(const gtsam::NonlinearFactorGraph& new_factors } /*********************************************************************************************************************/ -RISAM::UpdateResult RISAM::update(const gtsam::NonlinearFactorGraph& new_factors, const gtsam::Values& new_theta, - const boost::optional> extra_gnc_involved_keys, - const gtsam::ISAM2UpdateParams& update_params) { - // Determine if the update includes any graduated factors (i.e. potential outliers) +RISAM::UpdateResult RISAM::update( + const gtsam::NonlinearFactorGraph& new_factors, + const gtsam::Values& new_theta, + const boost::optional> extra_gnc_involved_keys, + const gtsam::ISAM2UpdateParams& update_params) { + // Determine if the update includes any graduated factors (i.e. potential + // outliers) bool update_includes_potential_outliers = false; for (auto& factor : new_factors) { auto grad_factor = boost::dynamic_pointer_cast(factor); @@ -66,12 +70,15 @@ RISAM::UpdateResult RISAM::update(const gtsam::NonlinearFactorGraph& new_factors // Update housekeeping for any mu_inits updateHouseKeeping(new_factors, update_params); - // Run the update: robust if we have potential outliers (or if requested) otherwise standard iSAM2 + // Run the update: robust if we have potential outliers (or if requested) + // otherwise standard iSAM2 UpdateResult result; if (extra_gnc_involved_keys || update_includes_potential_outliers) { - result = updateRobust(new_factors, new_theta, extra_gnc_involved_keys, update_params); + result = updateRobust(new_factors, new_theta, extra_gnc_involved_keys, + update_params); } else { - result.isam2_result = solver_->update(new_factors, new_theta, update_params); + result.isam2_result = + solver_->update(new_factors, new_theta, update_params); solver_->runBackSubstitution(); } @@ -82,61 +89,80 @@ RISAM::UpdateResult RISAM::update(const gtsam::NonlinearFactorGraph& new_factors } /*********************************************************************************************************************/ -RISAM::UpdateResult RISAM::updateRobust(const gtsam::NonlinearFactorGraph& new_factors, const gtsam::Values& new_theta, - const boost::optional> extra_gnc_involved_keys, - const gtsam::ISAM2UpdateParams& update_params) { +RISAM::UpdateResult RISAM::updateRobust( + const gtsam::NonlinearFactorGraph& new_factors, + const gtsam::Values& new_theta, + const boost::optional> extra_gnc_involved_keys, + const gtsam::ISAM2UpdateParams& update_params) { // Setup the result structure UpdateResult update_result; - // Create params for the initial update, note these may be modified by convexifyInvolvedFactors + // Create params for the initial update, note these may be modified by + // convexifyInvolvedFactors gtsam::ISAM2UpdateParams initial_update_params = update_params; // Convexify involved factors std::set convex_factors = - convexifyInvolvedFactors(new_factors, new_theta, extra_gnc_involved_keys, initial_update_params, update_result); - /// Setup a container to count the number of times mu is updated for each convex factor + convexifyInvolvedFactors(new_factors, new_theta, extra_gnc_involved_keys, + initial_update_params, update_result); + /// Setup a container to count the number of times mu is updated for each + /// convex factor std::map mu_update_count; for (auto& fidx : convex_factors) mu_update_count[fidx] = 0; - // Run the initial update to add new factors, after this the iSAM2 factors/var_index will match the risam copies - update_result.isam2_result = solver_->update(new_factors, new_theta, initial_update_params); + // Run the initial update to add new factors, after this the iSAM2 + // factors/var_index will match the risam copies + update_result.isam2_result = + solver_->update(new_factors, new_theta, initial_update_params); solver_->runBackSubstitution(); // Run GNC iterations until all convex factors have converged w.r.t. mu_ - gtsam::FactorIndices remaining_convex_factors(convex_factors.begin(), convex_factors.end()); - gtsam::ISAM2UpdateParams update_params_internal; // Used to force updates convex factors + ensure better ordering + gtsam::FactorIndices remaining_convex_factors(convex_factors.begin(), + convex_factors.end()); + gtsam::ISAM2UpdateParams + update_params_internal; // Used to force updates convex factors + ensure + // better ordering while (remaining_convex_factors.size() > 0) { // Get the current solution gtsam::Values current_est = solver_->calculateEstimate(); // Update mu for all convex factors and get the convex keys gtsam::FastList convexKeys; for (gtsam::FactorIndex fidx : remaining_convex_factors) { - auto grad_factor = boost::dynamic_pointer_cast(factors_.at(fidx)); - *(mu_[fidx]) = - grad_factor->kernel()->updateMu(*(mu_[fidx]), grad_factor->residual(current_est), mu_update_count[fidx]); - convexKeys.insert(convexKeys.end(), factors_.at(fidx)->begin(), factors_.at(fidx)->end()); + auto grad_factor = + boost::dynamic_pointer_cast(factors_.at(fidx)); + *(mu_[fidx]) = grad_factor->kernel()->updateMu( + *(mu_[fidx]), grad_factor->residual(current_est), + mu_update_count[fidx]); + convexKeys.insert(convexKeys.end(), factors_.at(fidx)->begin(), + factors_.at(fidx)->end()); mu_update_count[fidx]++; } // Run the Update, re-eliminating the subproblem defined at this time-step - update_params_internal.extraReelimKeys = convexKeys; // Force update to convex factors + update_params_internal.extraReelimKeys = + convexKeys; // Force update to convex factors update_params_internal.constrainedKeys = - gtsam::FastMap(); // Prevent iSAM2 from ordering convex factors first - solver_->update(gtsam::NonlinearFactorGraph(), gtsam::Values(), update_params_internal); + gtsam::FastMap(); // Prevent iSAM2 from ordering + // convex factors first + solver_->update(gtsam::NonlinearFactorGraph(), gtsam::Values(), + update_params_internal); solver_->runBackSubstitution(); // Update set of convex Factors gtsam::FactorIndices new_remaining_convex_factors; for (gtsam::FactorIndex fidx : remaining_convex_factors) { - auto grad_factor = boost::dynamic_pointer_cast(factors_.at(fidx)); - if (!grad_factor->kernel()->isMuConverged(*(mu_[fidx]))) new_remaining_convex_factors.push_back(fidx); + auto grad_factor = + boost::dynamic_pointer_cast(factors_.at(fidx)); + if (!grad_factor->kernel()->isMuConverged(*(mu_[fidx]))) + new_remaining_convex_factors.push_back(fidx); } remaining_convex_factors = new_remaining_convex_factors; } for (size_t i = 0; i < params_.number_extra_iters; i++) { // Orig RISAM preformed 1 extra iteration - solver_->update(gtsam::NonlinearFactorGraph(), gtsam::Values(), gtsam::ISAM2UpdateParams()); + solver_->update(gtsam::NonlinearFactorGraph(), gtsam::Values(), + gtsam::ISAM2UpdateParams()); solver_->runBackSubstitution(); } @@ -144,10 +170,13 @@ RISAM::UpdateResult RISAM::updateRobust(const gtsam::NonlinearFactorGraph& new_f } /*********************************************************************************************************************/ -gtsam::Values RISAM::calculateEstimate() { return solver_->calculateEstimate(); } +gtsam::Values RISAM::calculateEstimate() { + return solver_->calculateEstimate(); +} /*********************************************************************************************************************/ -void RISAM::forceSetLinearization(const gtsam::Key& key, const gtsam::Value& value) { +void RISAM::forceSetLinearization(const gtsam::Key& key, + const gtsam::Value& value) { solver_->forceSetLinearization(key, value); } @@ -158,7 +187,8 @@ std::set RISAM::getOutliers(double chi2_outlier_thresh) { for (size_t i = 0; i < factors_.size(); i++) { gtsam::NonlinearFactor::shared_ptr nlf_ptr = factors_.at(i); - GraduatedFactor::shared_ptr grad_ptr = boost::dynamic_pointer_cast(nlf_ptr); + GraduatedFactor::shared_ptr grad_ptr = + boost::dynamic_pointer_cast(nlf_ptr); if (grad_ptr) { auto dist = boost::math::chi_squared_distribution(nlf_ptr->dim()); const double thresh = boost::math::quantile(dist, chi2_outlier_thresh); @@ -182,17 +212,19 @@ std::set RISAM::getOutliers(double chi2_outlier_thresh) { /*********************************************************************************************************************/ void RISAM::updateHouseKeeping(const gtsam::NonlinearFactorGraph& new_factors, const gtsam::ISAM2UpdateParams& update_params) { - // To match the behavior of iSAM2 and ensure matching indices we first add info for new factors then remove info + // To match the behavior of iSAM2 and ensure matching indices we first add + // info for new factors then remove info // Add the factors and get their new indices - gtsam::FactorIndices new_factor_indices = - factors_.add_factors(new_factors, params_.isam2_params.findUnusedFactorSlots); + gtsam::FactorIndices new_factor_indices = factors_.add_factors( + new_factors, params_.isam2_params.findUnusedFactorSlots); // Update the variable index for the new factors variable_index_.augment(new_factors, new_factor_indices); // Add a mu and mu_init entry for each factor augmentMu(new_factors, new_factor_indices); - // Once we have added new factors we can remove any factors and corresponding info + // Once we have added new factors we can remove any factors and corresponding + // info gtsam::NonlinearFactorGraph removed_factors; removed_factors.reserve(update_params.removeFactorIndices.size()); for (const auto fidx : update_params.removeFactorIndices) { @@ -201,15 +233,18 @@ void RISAM::updateHouseKeeping(const gtsam::NonlinearFactorGraph& new_factors, mu_.at(fidx).reset(); mu_inits_.at(fidx).reset(); } - variable_index_.remove(update_params.removeFactorIndices.begin(), update_params.removeFactorIndices.end(), + variable_index_.remove(update_params.removeFactorIndices.begin(), + update_params.removeFactorIndices.end(), removed_factors); } /*********************************************************************************************************************/ -void RISAM::augmentMu(const gtsam::NonlinearFactorGraph& new_factors, const gtsam::FactorIndices& new_factor_indices) { +void RISAM::augmentMu(const gtsam::NonlinearFactorGraph& new_factors, + const gtsam::FactorIndices& new_factor_indices) { for (size_t i = 0; i < new_factors.nrFactors(); i++) { gtsam::FactorIndex fidx = new_factor_indices[i]; - auto grad_factor = boost::dynamic_pointer_cast(new_factors.at(i)); + auto grad_factor = + boost::dynamic_pointer_cast(new_factors.at(i)); // Get the pointers to mu and mu_init for this factor std::shared_ptr mu, mu_init; @@ -235,16 +270,23 @@ void RISAM::augmentMu(const gtsam::NonlinearFactorGraph& new_factors, const gtsa /*********************************************************************************************************************/ void RISAM::incrementMuInits() { gtsam::VectorValues delta = solver_->getDelta(); - if (delta.norm() / delta.size() < params_.outlier_mu_avg_var_convergence_thresh) { + if (delta.norm() / delta.size() < + params_.outlier_mu_avg_var_convergence_thresh) { gtsam::Values theta = solver_->calculateEstimate(); for (auto fidx : factors_to_check_status_) { - auto grad_factor = boost::dynamic_pointer_cast(factors_[fidx]); + auto grad_factor = + boost::dynamic_pointer_cast(factors_[fidx]); auto mahdist = grad_factor->residual(theta); - auto dist = boost::math::chi_squared_distribution(factors_[fidx]->dim()); - if (mahdist > boost::math::quantile(dist, params_.outlier_mu_chisq_upper_bound)) { - *(mu_inits_[fidx]) = grad_factor->kernel()->incrementMuInit(*(mu_inits_[fidx])); - } else if (mahdist < boost::math::quantile(dist, params_.outlier_mu_chisq_lower_bound)) { - *(mu_inits_[fidx]) = grad_factor->kernel()->incrementMuInitInv(*(mu_inits_[fidx])); + auto dist = + boost::math::chi_squared_distribution(factors_[fidx]->dim()); + if (mahdist > + boost::math::quantile(dist, params_.outlier_mu_chisq_upper_bound)) { + *(mu_inits_[fidx]) = + grad_factor->kernel()->incrementMuInit(*(mu_inits_[fidx])); + } else if (mahdist < boost::math::quantile( + dist, params_.outlier_mu_chisq_lower_bound)) { + *(mu_inits_[fidx]) = + grad_factor->kernel()->incrementMuInitInv(*(mu_inits_[fidx])); } } factors_to_check_status_.clear(); @@ -253,43 +295,53 @@ void RISAM::incrementMuInits() { /*********************************************************************************************************************/ std::set RISAM::convexifyInvolvedFactors( - const gtsam::NonlinearFactorGraph& new_factors, const gtsam::Values& new_theta, - const boost::optional> extra_gnc_involved_keys, gtsam::ISAM2UpdateParams& update_params, - UpdateResult& update_result) { - // Gather all involved keys - Involved keys are directly induced by the new factors + const gtsam::NonlinearFactorGraph& new_factors, + const gtsam::Values& new_theta, + const boost::optional> extra_gnc_involved_keys, + gtsam::ISAM2UpdateParams& update_params, UpdateResult& update_result) { + // Gather all involved keys - Involved keys are directly induced by the new + // factors gtsam::KeySet new_factor_keys = new_factors.keys(); gtsam::KeySet involved_keys = solver_->traverseTop(new_factors.keyVector()); involved_keys.insert(new_factor_keys.begin(), new_factor_keys.end()); // Add to the gathered involved keys, any keys specified by the user if (extra_gnc_involved_keys) { - involved_keys.insert(extra_gnc_involved_keys->begin(), extra_gnc_involved_keys->end()); + involved_keys.insert(extra_gnc_involved_keys->begin(), + extra_gnc_involved_keys->end()); // For any user specified involved keys also add them to the extra-reelim if (!update_params.extraReelimKeys) { - update_params.extraReelimKeys = gtsam::FastList(); // create if it does not exist + update_params.extraReelimKeys = + gtsam::FastList(); // create if it does not exist } - update_params.extraReelimKeys->insert(update_params.extraReelimKeys->end(), extra_gnc_involved_keys->begin(), + update_params.extraReelimKeys->insert(update_params.extraReelimKeys->end(), + extra_gnc_involved_keys->begin(), extra_gnc_involved_keys->end()); } - // Gather all affected keys - Super set of involved keys including keys modified by user params - auto [affected_keys, is_batch_update] = solver_->preComputeUpdateInfo(new_factors, new_theta, update_params); + // Gather all affected keys - Super set of involved keys including keys + // modified by user params + auto [affected_keys, is_batch_update] = + solver_->preComputeUpdateInfo(new_factors, new_theta, update_params); // Convexify update-involved factors std::set convex_factors; for (gtsam::Key affected_key : affected_keys) { for (gtsam::FactorIndex fidx : variable_index_[affected_key]) { - auto grad_factor = boost::dynamic_pointer_cast(factors_.at(fidx)); + auto grad_factor = + boost::dynamic_pointer_cast(factors_.at(fidx)); if (grad_factor) { - bool inside = true; // indicates that all variables touched by this factor are in the affected set - bool update_involved = false; // indicates a factor touches at least one variable in the involved set + bool inside = true; // indicates that all variables touched by this + // factor are in the affected set + bool update_involved = false; // indicates a factor touches at least + // one variable in the involved set for (gtsam::Key factor_key : factors_.at(fidx)->keys()) { inside = inside && affected_keys.count(factor_key); update_involved = update_involved || involved_keys.count(factor_key); } - // If the factor is update involved and marked as inside, we need to convexify - // Note: everything is inside on a batch update + // If the factor is update involved and marked as inside, we need to + // convexify Note: everything is inside on a batch update if ((inside || is_batch_update) && update_involved) { convex_factors.insert(fidx); factors_to_check_status_.insert(fidx); diff --git a/gtsam/sam/RISAM.h b/gtsam/sam/RISAM.h index 8823405080..5f5cbbd7f9 100644 --- a/gtsam/sam/RISAM.h +++ b/gtsam/sam/RISAM.h @@ -9,7 +9,7 @@ * -------------------------------------------------------------------------- */ -/** +/** * @file RISAM.h * @brief Robust Incremental Smoothing and Mapping (riSAM) * @date October 2025 @@ -137,19 +137,14 @@ class RISAM { /// performs a robust update. All extra involved keys will be treated as being /// apart of the current update wrt factor convexification UpdateResult update( - const NonlinearFactorGraph& new_factors = - NonlinearFactorGraph(), + const NonlinearFactorGraph& new_factors = NonlinearFactorGraph(), const Values& new_theta = Values(), const boost::optional> extra_gnc_involved_keys = boost::none, - const FactorIndices& remove_factor_indices = - FactorIndices(), - const boost::optional>& constrained_keys = - boost::none, - const boost::optional>& no_relin_keys = - boost::none, - const boost::optional>& extra_reelim_keys = - boost::none, + const FactorIndices& remove_factor_indices = FactorIndices(), + const boost::optional>& constrained_keys = boost::none, + const boost::optional>& no_relin_keys = boost::none, + const boost::optional>& extra_reelim_keys = boost::none, bool force_relinearize = false); /// @brief Update Interface. See ISAM2 docs for details as parameters match @@ -157,8 +152,7 @@ class RISAM { /// @param force_gnc_solve - overrides internal RISAM logic and performs a /// robust update UpdateResult update( - const NonlinearFactorGraph& new_factors, - const Values& new_theta, + const NonlinearFactorGraph& new_factors, const Values& new_theta, const boost::optional> extra_gnc_involved_keys, const ISAM2UpdateParams& update_params); @@ -197,8 +191,7 @@ class RISAM { * @see ISAM2::update for details on params */ UpdateResult updateRobust( - const NonlinearFactorGraph& new_factors, - const Values& new_theta, + const NonlinearFactorGraph& new_factors, const Values& new_theta, const boost::optional> extra_gnc_involved_keys, const ISAM2UpdateParams& update_params); @@ -239,11 +232,9 @@ class RISAM { * the given parameters. */ std::set convexifyInvolvedFactors( - const NonlinearFactorGraph& new_factors, - const Values& new_theta, + const NonlinearFactorGraph& new_factors, const Values& new_theta, const boost::optional> extra_gnc_involved_keys, - ISAM2UpdateParams& internal_update_params, - UpdateResult& update_result); + ISAM2UpdateParams& internal_update_params, UpdateResult& update_result); }; -} // namespace risam \ No newline at end of file +} // namespace gtsam \ No newline at end of file diff --git a/gtsam/sam/RISAMGraduatedFactor.cpp b/gtsam/sam/RISAMGraduatedFactor.cpp index e31d83098b..aef553906e 100644 --- a/gtsam/sam/RISAMGraduatedFactor.cpp +++ b/gtsam/sam/RISAMGraduatedFactor.cpp @@ -9,25 +9,29 @@ * -------------------------------------------------------------------------- */ - #include "risam/GraduatedFactor.h" namespace risam { /* ************************************************************************* */ -GraduatedFactor::GraduatedFactor(GraduatedKernel::shared_ptr kernel) : kernel_(kernel) { +GraduatedFactor::GraduatedFactor(GraduatedKernel::shared_ptr kernel) + : kernel_(kernel) { mu_ = std::make_shared(kernel_->muInit()); } /* ************************************************************************* */ -GraduatedFactor::GraduatedFactor(const GraduatedFactor& other) : kernel_(other.kernel_) { +GraduatedFactor::GraduatedFactor(const GraduatedFactor& other) + : kernel_(other.kernel_) { mu_ = std::make_shared(*(other.mu_)); } /* ************************************************************************* */ -const GraduatedKernel::shared_ptr GraduatedFactor::kernel() const { return kernel_; } +const GraduatedKernel::shared_ptr GraduatedFactor::kernel() const { + return kernel_; +} /* ************************************************************************* */ -void GraduatedFactor::updateKernel(const GraduatedKernel::shared_ptr& new_kernel) { +void GraduatedFactor::updateKernel( + const GraduatedKernel::shared_ptr& new_kernel) { kernel_ = new_kernel; *mu_ = new_kernel->muInit(); } diff --git a/gtsam/sam/RISAMGraduatedFactor.h b/gtsam/sam/RISAMGraduatedFactor.h index 76212b819f..38dbe65c1d 100644 --- a/gtsam/sam/RISAMGraduatedFactor.h +++ b/gtsam/sam/RISAMGraduatedFactor.h @@ -9,10 +9,9 @@ * -------------------------------------------------------------------------- */ - /** @brief Factor that implements a graduated robust cost function. - * A Graduated factor overrides a portion of the NoiseModelFactor interface to implement a graduated robust cost - * function for the factor. + * A Graduated factor overrides a portion of the NoiseModelFactor interface to + * implement a graduated robust cost function for the factor. * * @author Dan McGann * @date Mar 2022 @@ -50,17 +49,22 @@ class GraduatedFactor { /// @brief Copy constructor GraduatedFactor(const GraduatedFactor& other); - /** @brief Linearize this factor at a specific point, using the specified convexification parameter mu - * @param current_estimate: the variable estimate at which to linearize the Factor - * @param mu: the current value of the convexification parameter for this factor + /** @brief Linearize this factor at a specific point, using the specified + * convexification parameter mu + * @param current_estimate: the variable estimate at which to linearize the + * Factor + * @param mu: the current value of the convexification parameter for this + * factor */ - virtual gtsam::GaussianFactor::shared_ptr linearizeRobust(const gtsam::Values& current_estimate) const = 0; + virtual gtsam::GaussianFactor::shared_ptr linearizeRobust( + const gtsam::Values& current_estimate) const = 0; /// @brief returns the residual of the factor virtual double residual(const gtsam::Values& current_estimate) const = 0; /// @brief returns \rho(r) of the factor - virtual double robustResidual(const gtsam::Values& current_estimate) const = 0; + virtual double robustResidual( + const gtsam::Values& current_estimate) const = 0; /// @brief Returns the value of \mu_{init} for this graduated kernel const GraduatedKernel::shared_ptr kernel() const; @@ -69,14 +73,16 @@ class GraduatedFactor { /// WARN: Resets mu_ to the mu_init of the kernel void updateKernel(const GraduatedKernel::shared_ptr& new_kernel); - /// @brief Copies this factor as an instance of its base type without the graduated kernel + /// @brief Copies this factor as an instance of its base type without the + /// graduated kernel virtual gtsam::NonlinearFactor::shared_ptr cloneUngraduated() const = 0; }; template class GenericGraduatedFactor : public FACTOR_TYPE, public GraduatedFactor { static_assert(std::is_base_of::value, - "GraduatedFactor Must be instantiated with a Factor Derived from gtsam::NonlinearFactor."); + "GraduatedFactor Must be instantiated with a Factor Derived " + "from gtsam::NonlinearFactor."); /** TYPES **/ public: @@ -98,18 +104,23 @@ class GenericGraduatedFactor : public FACTOR_TYPE, public GraduatedFactor { /// @brief makes a deep copy gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( - gtsam::NonlinearFactor::shared_ptr(new GenericGraduatedFactor(*this))); + gtsam::NonlinearFactor::shared_ptr( + new GenericGraduatedFactor(*this))); } - gtsam::GaussianFactor::shared_ptr linearize(const gtsam::Values& current_estimate) const override { + gtsam::GaussianFactor::shared_ptr linearize( + const gtsam::Values& current_estimate) const override { return linearizeRobust(current_estimate); } /// @brief For graduated factors return 0.5 \rho(r)^2 - double error(const gtsam::Values& values) const override { return 0.5 * std::pow(robustResidual(values), 2); } + double error(const gtsam::Values& values) const override { + return 0.5 * std::pow(robustResidual(values), 2); + } /** GRADUATED INTERFACE **/ - gtsam::GaussianFactor::shared_ptr linearizeRobust(const gtsam::Values& current_estimate) const override { + gtsam::GaussianFactor::shared_ptr linearizeRobust( + const gtsam::Values& current_estimate) const override { double r = residual(current_estimate); gtsam::Matrix A; @@ -140,18 +151,23 @@ class GenericGraduatedFactor : public FACTOR_TYPE, public GraduatedFactor { return boost::make_shared(Ablock_map, b); } - /* ************************************************************************* */ + /* ************************************************************************* + */ double residual(const gtsam::Values& current_estimate) const override { return sqrt(2.0 * FACTOR_TYPE::error(current_estimate)); } - /* ************************************************************************* */ + /* ************************************************************************* + */ double robustResidual(const gtsam::Values& current_estimate) const override { return kernel_->error(residual(current_estimate), *mu_); } - /* ************************************************************************* */ - gtsam::NonlinearFactor::shared_ptr cloneUngraduated() const override { return FACTOR_TYPE::clone(); } + /* ************************************************************************* + */ + gtsam::NonlinearFactor::shared_ptr cloneUngraduated() const override { + return FACTOR_TYPE::clone(); + } }; /** HELPERS **/ @@ -161,7 +177,9 @@ GenericGraduatedFactor make_graduated(Args&&... args) { } template -typename GenericGraduatedFactor::shared_ptr make_shared_graduated(Args&&... args) { - return boost::make_shared>(std::forward(args)...); +typename GenericGraduatedFactor::shared_ptr make_shared_graduated( + Args&&... args) { + return boost::make_shared>( + std::forward(args)...); } } // namespace risam \ No newline at end of file diff --git a/gtsam/sam/RISAMGraduatedKernel.cpp b/gtsam/sam/RISAMGraduatedKernel.cpp index a84ec00f1a..f901e2243d 100644 --- a/gtsam/sam/RISAMGraduatedKernel.cpp +++ b/gtsam/sam/RISAMGraduatedKernel.cpp @@ -1,19 +1,18 @@ -#include "risam/GraduatedKernel.h" - #include +#include "risam/GraduatedKernel.h" #include "risam/Utilities.h" namespace risam { /*********************************************************************************************************************/ -double SIGKernel::error(const double &residual, const double &mu) const { +double SIGKernel::error(const double& residual, const double& mu) const { double r2 = residual * residual; double c2 = params_.shape_param * params_.shape_param; return 0.5 * (c2 * r2) / (c2 + pow(r2, mu)); } /*********************************************************************************************************************/ -double SIGKernel::weight(const double &residual, const double &mu) const { +double SIGKernel::weight(const double& residual, const double& mu) const { double r2 = residual * residual; double c2 = params_.shape_param * params_.shape_param; double sqrt_denom = c2 + pow(r2, mu); @@ -21,41 +20,58 @@ double SIGKernel::weight(const double &residual, const double &mu) const { } /*********************************************************************************************************************/ -double SIGKernel::updateMu(const double &mu, const double &residual, const size_t &update_count) const { +double SIGKernel::updateMu(const double& mu, const double& residual, + const size_t& update_count) const { if (params_.mu_update_strat == MuUpdateStrategy::MCGANN_2023) { // Mu update empirically discovered and presented in the orig riSAM paper return std::min(1.0, mu + (mu + 0.1) * 1.2); } else if (params_.mu_update_strat == MuUpdateStrategy::SIMPLE_HUBER) { - // Simple mu update of [mu_init, 0.5, 1.0] where 0.5 results in approximately the huber kernel + // Simple mu update of [mu_init, 0.5, 1.0] where 0.5 results in + // approximately the huber kernel return mu < 0.5 ? 0.5 : 1.0; } else if (params_.mu_update_strat == MuUpdateStrategy::KANG_CODE_2023) { - // 3 Step process [mu_init, mu*, 1] where mu* is the inflection point for the current residual + // 3 Step process [mu_init, mu*, 1] where mu* is the inflection point for + // the current residual if (!params_.kang_residual_threshold) { - throw std::runtime_error("mu_update_strat == KANG_CODE_2023 but no kang_residual_threshold provided in params"); + throw std::runtime_error( + "mu_update_strat == KANG_CODE_2023 but no kang_residual_threshold " + "provided in params"); } if (update_count == 0 && residual > params_.shape_param / std::sqrt(3) && residual < (*params_.kang_residual_threshold)) { double mu_inflection = bisectionMuSearch( - [residual, this](double new_mu) { return secondDerivative(residual, new_mu); }, 0.5, 1.0, 1e-9); - // If the inflection point is less than the current mu_init skip to mu_final + [residual, this](double new_mu) { + return secondDerivative(residual, new_mu); + }, + 0.5, 1.0, 1e-9); + // If the inflection point is less than the current mu_init skip to + // mu_final return mu_inflection <= mu ? 1.0 : mu_inflection; } else { - // If update_count > 0 then we have already run the mu* update, so return 1.0 - // If r < c/sqrt(3) then the result will be > 1.0 so skip search and return 1.0 - // If r > kang_residual_threshold after the convex iter then this measurement is most def an outlier so go to 1.0 - // Note: This final condition is present in the code, but not in the paper by Kang et al. + // If update_count > 0 then we have already run the mu* update, so + // return 1.0 If r < c/sqrt(3) then the result will be > 1.0 so skip + // search and return 1.0 If r > kang_residual_threshold after the convex + // iter then this measurement is most def an outlier so go to 1.0 Note: + // This final condition is present in the code, but not in the paper by + // Kang et al. return 1.0; } } else if (params_.mu_update_strat == MuUpdateStrategy::KANG_PAPER_2023) { - // 3 Step process [mu_init, mu*, 1] where mu* is the inflection point for the current residual + // 3 Step process [mu_init, mu*, 1] where mu* is the inflection point for + // the current residual if (update_count == 0 && residual > params_.shape_param / std::sqrt(3)) { double mu_inflection = bisectionMuSearch( - [residual, this](double new_mu) { return secondDerivative(residual, new_mu); }, 0.5, 1.0, 1e-9); - // If the inflection point is less than the current mu_init skip to mu_final + [residual, this](double new_mu) { + return secondDerivative(residual, new_mu); + }, + 0.5, 1.0, 1e-9); + // If the inflection point is less than the current mu_init skip to + // mu_final return mu_inflection <= mu ? 1.0 : mu_inflection; } else { - // If update_count > 0 then we have already run the mu* update, so return 1.0 - // If r < c/sqrt(3) then the result will be > 1.0 so skip search and return 1.0 + // If update_count > 0 then we have already run the mu* update, so + // return 1.0 If r < c/sqrt(3) then the result will be > 1.0 so skip + // search and return 1.0 return 1.0; } } else if (params_.mu_update_strat == MuUpdateStrategy::ONESTEP) { @@ -71,28 +87,33 @@ double SIGKernel::updateMu(const double &mu, const double &residual, const size_ // Arithmetic Series: mu[t+1] = mu[t] + (1/4) return std::min(1.0, mu + 0.25); } else if (params_.mu_update_strat == MuUpdateStrategy::ASYM_FOURTHS) { - // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.5 if (t==0) else mu[t] + 0.25 + // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.5 if (t==0) else mu[t] + // + 0.25 if (update_count == 0) return std::min(1.0, mu + 0.5); return std::min(1.0, mu + 0.25); } else if (params_.mu_update_strat == MuUpdateStrategy::NONCONVEX) { - // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.7 if (t==0) else mu[t] + 0.2 + // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.7 if (t==0) else mu[t] + // + 0.2 if (update_count == 0) return std::min(1.0, mu + 0.7); return std::min(1.0, mu + 0.2); } else if (params_.mu_update_strat == MuUpdateStrategy::STABLE) { - // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.5 if (t==0), mu[t] + 0.4 if (t==1), else mu[t] + 0.05 - // My attempt to build the most stable method possible given lessons learned from multi-robot conditions + // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.5 if (t==0), mu[t] + + // 0.4 if (t==1), else mu[t] + 0.05 My attempt to build the most stable + // method possible given lessons learned from multi-robot conditions if (update_count == 0) return std::min(1.0, mu + 0.5); if (update_count == 1) return std::min(1.0, mu + 0.4); return std::min(1.0, mu + 0.05); } else { - throw std::runtime_error("updateMu: requested MuUpdateStrategy is not implemented."); + throw std::runtime_error( + "updateMu: requested MuUpdateStrategy is not implemented."); } } /*********************************************************************************************************************/ -double SIGKernel::incrementMuInit(const double &mu) const { +double SIGKernel::incrementMuInit(const double& mu) const { if (params_.mu_init_inc_strat == MuInitIncrementStrategy::MCGANN_2023) { - // Mu init update Empirically discovered and presented in the orig riSAM paper [Non-Symmetric] + // Mu init update Empirically discovered and presented in the orig riSAM + // paper [Non-Symmetric] return std::min(1.0, mu + (mu + 0.1) * 1.2); } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_3) { // Simple update with 3 steps from init to final @@ -113,14 +134,17 @@ double SIGKernel::incrementMuInit(const double &mu) const { // Simple update with 50 steps from init to final return std::min(1.0, mu + 0.02); } else { - throw std::runtime_error("incrementMuInit: requested MuInitIncrementStrategy is not implemented."); + throw std::runtime_error( + "incrementMuInit: requested MuInitIncrementStrategy is not " + "implemented."); } } /*********************************************************************************************************************/ -double SIGKernel::incrementMuInitInv(const double &mu) const { +double SIGKernel::incrementMuInitInv(const double& mu) const { if (params_.mu_init_inc_strat == MuInitIncrementStrategy::MCGANN_2023) { - // Mu init update Empirically discovered and presented in the orig riSAM paper [Non-Symmetric] + // Mu init update Empirically discovered and presented in the orig riSAM + // paper [Non-Symmetric] return std::max(0.0, mu - 0.1); } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_3) { // Simple Update with 3 steps from init to final @@ -141,19 +165,26 @@ double SIGKernel::incrementMuInitInv(const double &mu) const { // Simple update with 50 steps from init to final return std::max(0.0, mu - 0.02); } else { - throw std::runtime_error("incrementMuInitInv: requested MuInitIncrementStrategy is not implemented."); + throw std::runtime_error( + "incrementMuInitInv: requested MuInitIncrementStrategy is not " + "implemented."); } } /*********************************************************************************************************************/ -bool SIGKernel::isMuConverged(const double &mu) const { return mu >= convergence_thresh_; } +bool SIGKernel::isMuConverged(const double& mu) const { + return mu >= convergence_thresh_; +} /*********************************************************************************************************************/ -double SIGKernel::shapeParamFromInfThresh(double influence_thresh, size_t dof, double chi2_outlier_thresh) { - const double outlier_residual_thresh = - boost::math::quantile(boost::math::chi_squared_distribution(dof), chi2_outlier_thresh); - // Equation from taking derivative of SIGKernel setting equal to influence_thresh and solving for mu=1 - const double t1 = std::sqrt(2 * influence_thresh * std::pow(outlier_residual_thresh, 5)); +double SIGKernel::shapeParamFromInfThresh(double influence_thresh, size_t dof, + double chi2_outlier_thresh) { + const double outlier_residual_thresh = boost::math::quantile( + boost::math::chi_squared_distribution(dof), chi2_outlier_thresh); + // Equation from taking derivative of SIGKernel setting equal to + // influence_thresh and solving for mu=1 + const double t1 = + std::sqrt(2 * influence_thresh * std::pow(outlier_residual_thresh, 5)); const double t2 = influence_thresh * std::pow(outlier_residual_thresh, 2); const double t3 = influence_thresh - 2 * outlier_residual_thresh; return std::sqrt(-((t1 + t2) / t3)); @@ -161,20 +192,22 @@ double SIGKernel::shapeParamFromInfThresh(double influence_thresh, size_t dof, d /*********************************************************************************************************************/ double SIGKernel::shapeParamFromChi2(size_t dof, double chi2_threshold) { - return boost::math::quantile(boost::math::chi_squared_distribution(dof), chi2_threshold); + return boost::math::quantile( + boost::math::chi_squared_distribution(dof), chi2_threshold); } /*********************************************************************************************************************/ -double SIGKernel::secondDerivative(const double &r, const double &mu) const { +double SIGKernel::secondDerivative(const double& r, const double& mu) const { // Setup helpful values double c2 = params_.shape_param * params_.shape_param; double c4 = c2 * c2; double c6 = c4 * c2; // Calculate - double numerator = c6 - // Constant - (c4 * (mu + 2) * (2 * mu - 1) * std::pow(r, 2 * mu)) + // term 1 - (c2 * (mu - 1) * (2 * mu - 1) * std::pow(r, 4 * mu)); // term 2 + double numerator = + c6 - // Constant + (c4 * (mu + 2) * (2 * mu - 1) * std::pow(r, 2 * mu)) + // term 1 + (c2 * (mu - 1) * (2 * mu - 1) * std::pow(r, 4 * mu)); // term 2 double denominator = std::pow(c2 + std::pow(r, 2 * mu), 3); return numerator / denominator; diff --git a/gtsam/sam/RISAMGraduatedKernel.h b/gtsam/sam/RISAMGraduatedKernel.h index 2c25a4c41b..1dd194b9fb 100644 --- a/gtsam/sam/RISAMGraduatedKernel.h +++ b/gtsam/sam/RISAMGraduatedKernel.h @@ -1,11 +1,13 @@ /** @brief Interface for a graduated robust kernel. - * That is a robust kernel \rho(r_i, \mu) that varies from highly convex (quadratic) when \mu = \mu_{init} to non-convex - * (robust cost) as \mu trends towards \mu_{final} + * That is a robust kernel \rho(r_i, \mu) that varies from highly convex + * (quadratic) when \mu = \mu_{init} to non-convex (robust cost) as \mu trends + * towards \mu_{final} * * * Citations: - * [2] - W. Kang et al. "Efficient Graduated Non-Convexity for Pose Graph Optimization", International Conference on - * Control, Automation and Systems (ICCAS), 2024 + * [2] - W. Kang et al. "Efficient Graduated Non-Convexity for Pose Graph + * Optimization", International Conference on Control, Automation and Systems + * (ICCAS), 2024 * * @author Dan McGann * @date Mar 2022 @@ -41,7 +43,7 @@ class GraduatedKernel { * @param residual: r_i the current (whitened) residual * @param mu: \mu the current convexity parameter */ - virtual double error(const double &residual, const double &mu) const = 0; + virtual double error(const double& residual, const double& mu) const = 0; /** @brief Computes the weight of the graduated robust cost function * weight = w(r_i) @@ -49,17 +51,20 @@ class GraduatedKernel { * @param residual: r_i the current (whitened) residual * @param mu: \mu the current convexity parameter */ - virtual double weight(const double &residual, const double &mu) const = 0; + virtual double weight(const double& residual, const double& mu) const = 0; /** @brief Weights a linearized system - * Weight results in the solving of: w_i * r_i^2 = \sqrt(w_i) (A_i x + b_i) = \sqrt(w_i)* A_i x + \sqrt(w_i) * b_i + * Weight results in the solving of: w_i * r_i^2 = \sqrt(w_i) (A_i x + b_i) = + * \sqrt(w_i)* A_i x + \sqrt(w_i) * b_i * * @param A: Whitened linear system from a factor (\Sigma^{-0.5} H_i) - * @param b: Whitened linear system error from a factor (0.5 * \Sigma^(-0.5) ||h_i(x_i) - z_i||^2 = 0.5 * r_i^2) + * @param b: Whitened linear system error from a factor (0.5 * \Sigma^(-0.5) + * ||h_i(x_i) - z_i||^2 = 0.5 * r_i^2) */ - void weightSystem(std::vector &A, gtsam::Vector &b, const double &residual, const double &mu) const { + void weightSystem(std::vector& A, gtsam::Vector& b, + const double& residual, const double& mu) const { double sqrt_weight = sqrt(weight(residual, mu)); - for (gtsam::Matrix &Aj : A) { + for (gtsam::Matrix& Aj : A) { Aj *= sqrt_weight; } b *= sqrt_weight; @@ -71,17 +76,21 @@ class GraduatedKernel { /// @brief Returns the next value of \mu for this graduated kernel /// @param mu: The current value of mu /// @param residual: The current residual of the factor - /// @param update_count: The number of mu updates during this graduated solve 0 = just made convex - virtual double updateMu(const double &mu, const double &residual, const size_t &update_count) const = 0; + /// @param update_count: The number of mu updates during this graduated solve + /// 0 = just made convex + virtual double updateMu(const double& mu, const double& residual, + const size_t& update_count) const = 0; - /// @brief Returns an incremented mu_init, used to decrease the influence of known outliers - virtual double incrementMuInit(const double &mu_init) const = 0; + /// @brief Returns an incremented mu_init, used to decrease the influence of + /// known outliers + virtual double incrementMuInit(const double& mu_init) const = 0; /// @brief Returns the previous value of mu_init for this graduated kernel - virtual double incrementMuInitInv(const double &mu) const = 0; + virtual double incrementMuInitInv(const double& mu) const = 0; - /// @brief Returns true iff the value of \mu has converged to a non-convex state - virtual bool isMuConverged(const double &mu) const = 0; + /// @brief Returns true iff the value of \mu has converged to a non-convex + /// state + virtual bool isMuConverged(const double& mu) const = 0; }; /* ************************************************************************* */ @@ -95,20 +104,33 @@ class SIGKernel : public GraduatedKernel { /// @brief Strategies for updating Mu between GNC iterations of riSAM enum class MuUpdateStrategy { - MCGANN_2023, // Geometric Series: mu[t+1] = mu[t] + (mu[t] + 0.1) * 1.2 - SIMPLE_HUBER, // Fixed Pattern [mu_init, 0.5, 1.0] - KANG_CODE_2023, // Sequence: [mu_init, mu*, 1.0] with early outlier rejection for r > kang_residual_threshold [2] - KANG_PAPER_2023, // Sequence: [mu_init, mu*, 1.0] (no early outlier rejection was presented in paper) [2] + MCGANN_2023, // Geometric Series: mu[t+1] = mu[t] + (mu[t] + 0.1) * 1.2 + SIMPLE_HUBER, // Fixed Pattern [mu_init, 0.5, 1.0] + KANG_CODE_2023, // Sequence: [mu_init, mu*, 1.0] with early outlier + // rejection for r > kang_residual_threshold [2] + KANG_PAPER_2023, // Sequence: [mu_init, mu*, 1.0] (no early outlier + // rejection was presented in paper) [2] ONESTEP, // Jump to mu=1.0 after initial convex step HALVES, // Arithmetic Series: mu[t+1] = mu[t] + (1/2) THIRDS, // Arithmetic Series: mu[t+1] = mu[t] + (1/3) FOURTHS, // Arithmetic Series: mu[t+1] = mu[t] + (1/4) - ASYM_FOURTHS, // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.5 if (t==0) else mu[t] + 0.25 - NONCONVEX, // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.7 if (t==0) else mu[t] + 0.2 - STABLE // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.5 if (t==0), mu[t] + 0.4 if (t==1), else mu[t] + 0.05 + ASYM_FOURTHS, // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.5 if + // (t==0) else mu[t] + 0.25 + NONCONVEX, // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.7 if + // (t==0) else mu[t] + 0.2 + STABLE // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.5 if (t==0), + // mu[t] + 0.4 if (t==1), else mu[t] + 0.05 }; /// @brief Strategies for update Mu_init after convergence - enum class MuInitIncrementStrategy { MCGANN_2023, EQUAL_3, EQUAL_4, EQUAL_5, EQUAL_10, EQUAL_20, EQUAL_50 }; + enum class MuInitIncrementStrategy { + MCGANN_2023, + EQUAL_3, + EQUAL_4, + EQUAL_5, + EQUAL_10, + EQUAL_20, + EQUAL_50 + }; struct Parameters { /// @brief The shape parameter of the GM kernel. @@ -116,16 +138,18 @@ class SIGKernel : public GraduatedKernel { /// @brief The update strategy to use for mu updates MuUpdateStrategy mu_update_strat{MuUpdateStrategy::MCGANN_2023}; /// @brief The update strategy to use for mu_init increments - MuInitIncrementStrategy mu_init_inc_strat{MuInitIncrementStrategy::MCGANN_2023}; + MuInitIncrementStrategy mu_init_inc_strat{ + MuInitIncrementStrategy::MCGANN_2023}; - /// @brief The residual threshold use to skip to mu=1.0 in the provided for [2] - /// Required only if mu_update_strat == KANG_CODE_2023 + /// @brief The residual threshold use to skip to mu=1.0 in the provided for + /// [2] Required only if mu_update_strat == KANG_CODE_2023 std::optional kang_residual_threshold{std::nullopt}; /// @brief Default Constructor explicit Parameters() = default; /// @brief Parameterized Constructor - Parameters(double shape_param, MuUpdateStrategy mu_update_strat, MuInitIncrementStrategy mu_init_inc_strat, + Parameters(double shape_param, MuUpdateStrategy mu_update_strat, + MuInitIncrementStrategy mu_init_inc_strat, std::optional kang_residual_threshold = std::nullopt) : shape_param(shape_param), mu_update_strat(mu_update_strat), @@ -142,56 +166,67 @@ class SIGKernel : public GraduatedKernel { /** @brief Params Constructor * @param params: The parameters to configure the kernel */ - SIGKernel(const Parameters params = Parameters::Default()) : GraduatedKernel(0.0, 1.0), params_(params) {}; + SIGKernel(const Parameters params = Parameters::Default()) + : GraduatedKernel(0.0, 1.0), params_(params) {}; /** @brief Individual Parameter Constructor * @param shape_param: The shape parameter for the kernel * @param mu_update_strat: The update strategy to use for mu updates * @param mu_init_inc_strat: The update strategy to use for mu_init increments */ - SIGKernel(double shape_param, MuUpdateStrategy mu_update_strat, MuInitIncrementStrategy mu_init_inc_strat, + SIGKernel(double shape_param, MuUpdateStrategy mu_update_strat, + MuInitIncrementStrategy mu_init_inc_strat, std::optional kang_residual_threshold = std::nullopt) : GraduatedKernel(0.0, 1.0), - params_(Parameters(shape_param, mu_update_strat, mu_init_inc_strat, kang_residual_threshold)) {}; + params_(Parameters(shape_param, mu_update_strat, mu_init_inc_strat, + kang_residual_threshold)) {}; /** Interface **/ public: /// @brief @see GraduatedKernel - double error(const double &residual, const double &mu) const override; + double error(const double& residual, const double& mu) const override; /// @brief @see GraduatedKernel - double weight(const double &residual, const double &mu) const override; + double weight(const double& residual, const double& mu) const override; /// @brief @see GraduatedKernel - double updateMu(const double &mu, const double &residual, const size_t &update_count) const override; + double updateMu(const double& mu, const double& residual, + const size_t& update_count) const override; /// @brief @see GraduatedKernel - double incrementMuInit(const double &mu) const override; + double incrementMuInit(const double& mu) const override; /// @brief @see GraduatedKernel - double incrementMuInitInv(const double &mu) const override; + double incrementMuInitInv(const double& mu) const override; /// @brief @see GraduatedKernel - bool isMuConverged(const double &mu) const override; + bool isMuConverged(const double& mu) const override; - /** @brief Computes a shape param based on an an influence threshold for outliers - * Computes a shape param such that an outlier (any measurement with residual equal to or greater than the - * chi2_outlier_threshold) will have an "influence" (derivative of kernel at mu=1) less than or equal to the + /** @brief Computes a shape param based on an an influence threshold for + * outliers Computes a shape param such that an outlier (any measurement with + * residual equal to or greater than the chi2_outlier_threshold) will have an + * "influence" (derivative of kernel at mu=1) less than or equal to the * influence_threshold: d/dr(\rho(r^2)) < influence_thresh * @param influence_thresh - The max influence permited by an outlier - * @param dof - The degrees of freedom of the corresponding measurement (use for chi2) - * @param chi2_outlier_thresh - The threshold for outlier (i.e. 0.95 = any measurement with residual greater than 95% - * of expected measurements is an outlier) - * @returns The shape param such that outliers will have inf <= influence_thresh + * @param dof - The degrees of freedom of the corresponding measurement (use + * for chi2) + * @param chi2_outlier_thresh - The threshold for outlier (i.e. 0.95 = any + * measurement with residual greater than 95% of expected measurements is an + * outlier) + * @returns The shape param such that outliers will have inf <= + * influence_thresh */ - static double shapeParamFromInfThresh(double influence_thresh, size_t dof, double chi2_outlier_thresh); + static double shapeParamFromInfThresh(double influence_thresh, size_t dof, + double chi2_outlier_thresh); /** @brief Computes a shape param based on dimensionality and a chi2_threshold * Computes a shape param c = chi2.inv_cdf(dof, thresh) * @param dof - The degrees of freedom of the corresponding measurement - * @param chi2_threshold - The the threshold at which to compute the shape param value - * @returns The shape param that scales based on dimensionality of the measurement + * @param chi2_threshold - The the threshold at which to compute the shape + * param value + * @returns The shape param that scales based on dimensionality of the + * measurement */ static double shapeParamFromChi2(size_t dof, double chi2_threshold); /** HELPERS **/ protected: /// @brief Second derivative of the SIG Kernel wrt r - double secondDerivative(const double &r, const double &mu) const; + double secondDerivative(const double& r, const double& mu) const; }; } // namespace risam \ No newline at end of file From 1ed4be25e838a71c6e5a2ff9067a02765824e8db Mon Sep 17 00:00:00 2001 From: Dan McGann Date: Mon, 22 Dec 2025 12:02:50 -0500 Subject: [PATCH 03/17] Format + Fix Compilation - Formats riSAM code to gtsam standard - Switches to use std rather than boost to meet gtsam 4.3 convention - Removes some unneeded functions --- gtsam/sam/RISAM.cpp | 112 +++++++++++------------------ gtsam/sam/RISAM.h | 49 +++++-------- gtsam/sam/RISAMGraduatedFactor.cpp | 4 +- gtsam/sam/RISAMGraduatedFactor.h | 16 ++--- gtsam/sam/RISAMGraduatedKernel.cpp | 62 ++++++++-------- gtsam/sam/RISAMGraduatedKernel.h | 10 +-- 6 files changed, 107 insertions(+), 146 deletions(-) diff --git a/gtsam/sam/RISAM.cpp b/gtsam/sam/RISAM.cpp index 920cb11be3..a3cc0868f3 100644 --- a/gtsam/sam/RISAM.cpp +++ b/gtsam/sam/RISAM.cpp @@ -13,36 +13,23 @@ * @file RISAM.cpp * @brief Implementation of the RISAM algorithm. * @author Dan McGann - * @date October 2025 + * @date October 2025 */ #include +#include -#include +namespace gtsam { -#include "risam/GraduatedFactor.h" - -namespace risam { - -/** - * #### ## ## ######## ######## ######## ######## ### ###### ######## - * ## ### ## ## ## ## ## ## ## ## ## ## ## - * ## #### ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ###### ######## ###### ## ## ## ###### - * ## ## #### ## ## ## ## ## ######### ## ## - * ## ## ### ## ## ## ## ## ## ## ## ## ## - * #### ## ## ## ######## ## ## ## ## ## ###### ######## - */ - -/*********************************************************************************************************************/ +/* ************************************************************************* */ RISAM::UpdateResult RISAM::update( const gtsam::NonlinearFactorGraph& new_factors, const gtsam::Values& new_theta, - const boost::optional> extra_gnc_involved_keys, + const std::optional> extra_gnc_involved_keys, const gtsam::FactorIndices& remove_factor_indices, - const boost::optional>& constrained_keys, - const boost::optional>& no_relin_keys, - const boost::optional>& extra_reelim_keys, + const std::optional>& constrained_keys, + const std::optional>& no_relin_keys, + const std::optional>& extra_reelim_keys, bool force_relinearize) { gtsam::ISAM2UpdateParams update_params; update_params.removeFactorIndices = remove_factor_indices; @@ -53,17 +40,17 @@ RISAM::UpdateResult RISAM::update( return update(new_factors, new_theta, extra_gnc_involved_keys, update_params); } -/*********************************************************************************************************************/ +/* ************************************************************************* */ RISAM::UpdateResult RISAM::update( const gtsam::NonlinearFactorGraph& new_factors, const gtsam::Values& new_theta, - const boost::optional> extra_gnc_involved_keys, + const std::optional> extra_gnc_involved_keys, const gtsam::ISAM2UpdateParams& update_params) { // Determine if the update includes any graduated factors (i.e. potential // outliers) bool update_includes_potential_outliers = false; for (auto& factor : new_factors) { - auto grad_factor = boost::dynamic_pointer_cast(factor); + auto grad_factor = std::dynamic_pointer_cast(factor); if (grad_factor) update_includes_potential_outliers = true; } @@ -79,7 +66,7 @@ RISAM::UpdateResult RISAM::update( } else { result.isam2_result = solver_->update(new_factors, new_theta, update_params); - solver_->runBackSubstitution(); + solver_->calculateEstimate(); } // If we have converged update mu_inits_based on status @@ -88,11 +75,11 @@ RISAM::UpdateResult RISAM::update( return result; } -/*********************************************************************************************************************/ +/* ************************************************************************* */ RISAM::UpdateResult RISAM::updateRobust( const gtsam::NonlinearFactorGraph& new_factors, const gtsam::Values& new_theta, - const boost::optional> extra_gnc_involved_keys, + const std::optional> extra_gnc_involved_keys, const gtsam::ISAM2UpdateParams& update_params) { // Setup the result structure UpdateResult update_result; @@ -114,7 +101,7 @@ RISAM::UpdateResult RISAM::updateRobust( // factors/var_index will match the risam copies update_result.isam2_result = solver_->update(new_factors, new_theta, initial_update_params); - solver_->runBackSubstitution(); + solver_->calculateEstimate(); // Run GNC iterations until all convex factors have converged w.r.t. mu_ gtsam::FactorIndices remaining_convex_factors(convex_factors.begin(), @@ -129,7 +116,7 @@ RISAM::UpdateResult RISAM::updateRobust( gtsam::FastList convexKeys; for (gtsam::FactorIndex fidx : remaining_convex_factors) { auto grad_factor = - boost::dynamic_pointer_cast(factors_.at(fidx)); + std::dynamic_pointer_cast(factors_.at(fidx)); *(mu_[fidx]) = grad_factor->kernel()->updateMu( *(mu_[fidx]), grad_factor->residual(current_est), mu_update_count[fidx]); @@ -146,13 +133,13 @@ RISAM::UpdateResult RISAM::updateRobust( // convex factors first solver_->update(gtsam::NonlinearFactorGraph(), gtsam::Values(), update_params_internal); - solver_->runBackSubstitution(); + solver_->calculateEstimate(); // Update set of convex Factors gtsam::FactorIndices new_remaining_convex_factors; for (gtsam::FactorIndex fidx : remaining_convex_factors) { auto grad_factor = - boost::dynamic_pointer_cast(factors_.at(fidx)); + std::dynamic_pointer_cast(factors_.at(fidx)); if (!grad_factor->kernel()->isMuConverged(*(mu_[fidx]))) new_remaining_convex_factors.push_back(fidx); } @@ -163,24 +150,18 @@ RISAM::UpdateResult RISAM::updateRobust( // Orig RISAM preformed 1 extra iteration solver_->update(gtsam::NonlinearFactorGraph(), gtsam::Values(), gtsam::ISAM2UpdateParams()); - solver_->runBackSubstitution(); + solver_->calculateEstimate(); } return update_result; } -/*********************************************************************************************************************/ +/* ************************************************************************* */ gtsam::Values RISAM::calculateEstimate() { return solver_->calculateEstimate(); } -/*********************************************************************************************************************/ -void RISAM::forceSetLinearization(const gtsam::Key& key, - const gtsam::Value& value) { - solver_->forceSetLinearization(key, value); -} - -/*********************************************************************************************************************/ +/* ************************************************************************* */ std::set RISAM::getOutliers(double chi2_outlier_thresh) { std::set outlier_factors; gtsam::Values current_est = solver_->calculateEstimate(); @@ -188,10 +169,10 @@ std::set RISAM::getOutliers(double chi2_outlier_thresh) { for (size_t i = 0; i < factors_.size(); i++) { gtsam::NonlinearFactor::shared_ptr nlf_ptr = factors_.at(i); GraduatedFactor::shared_ptr grad_ptr = - boost::dynamic_pointer_cast(nlf_ptr); + std::dynamic_pointer_cast(nlf_ptr); if (grad_ptr) { - auto dist = boost::math::chi_squared_distribution(nlf_ptr->dim()); - const double thresh = boost::math::quantile(dist, chi2_outlier_thresh); + const double thresh = + internal::chi_squared_quantile(nlf_ptr->dim(), chi2_outlier_thresh); const double residual = grad_ptr->residual(current_est); if (residual > thresh) outlier_factors.insert(i); } @@ -199,17 +180,7 @@ std::set RISAM::getOutliers(double chi2_outlier_thresh) { return outlier_factors; } -/** - * ## ## ######## ## ######## ######## ######## ###### - * ## ## ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## - * ######### ###### ## ######## ###### ######## ###### - * ## ## ## ## ## ## ## ## ## - * ## ## ## ## ## ## ## ## ## ## - * ## ## ######## ######## ## ######## ## ## ###### - */ - -/*********************************************************************************************************************/ +/* ************************************************************************* */ void RISAM::updateHouseKeeping(const gtsam::NonlinearFactorGraph& new_factors, const gtsam::ISAM2UpdateParams& update_params) { // To match the behavior of iSAM2 and ensure matching indices we first add @@ -238,13 +209,13 @@ void RISAM::updateHouseKeeping(const gtsam::NonlinearFactorGraph& new_factors, removed_factors); } -/*********************************************************************************************************************/ +/* ************************************************************************* */ void RISAM::augmentMu(const gtsam::NonlinearFactorGraph& new_factors, const gtsam::FactorIndices& new_factor_indices) { for (size_t i = 0; i < new_factors.nrFactors(); i++) { gtsam::FactorIndex fidx = new_factor_indices[i]; auto grad_factor = - boost::dynamic_pointer_cast(new_factors.at(i)); + std::dynamic_pointer_cast(new_factors.at(i)); // Get the pointers to mu and mu_init for this factor std::shared_ptr mu, mu_init; @@ -267,7 +238,7 @@ void RISAM::augmentMu(const gtsam::NonlinearFactorGraph& new_factors, } } -/*********************************************************************************************************************/ +/* ************************************************************************* */ void RISAM::incrementMuInits() { gtsam::VectorValues delta = solver_->getDelta(); if (delta.norm() / delta.size() < @@ -275,16 +246,17 @@ void RISAM::incrementMuInits() { gtsam::Values theta = solver_->calculateEstimate(); for (auto fidx : factors_to_check_status_) { auto grad_factor = - boost::dynamic_pointer_cast(factors_[fidx]); - auto mahdist = grad_factor->residual(theta); - auto dist = - boost::math::chi_squared_distribution(factors_[fidx]->dim()); - if (mahdist > - boost::math::quantile(dist, params_.outlier_mu_chisq_upper_bound)) { + std::dynamic_pointer_cast(factors_[fidx]); + double mahdist = grad_factor->residual(theta); + const double mah_upper_bound = internal::chi_squared_quantile( + factors_[fidx]->dim(), params_.outlier_mu_chisq_upper_bound); + const double mah_lower_bound = internal::chi_squared_quantile( + factors_[fidx]->dim(), params_.outlier_mu_chisq_lower_bound); + + if (mahdist > mah_upper_bound) { *(mu_inits_[fidx]) = grad_factor->kernel()->incrementMuInit(*(mu_inits_[fidx])); - } else if (mahdist < boost::math::quantile( - dist, params_.outlier_mu_chisq_lower_bound)) { + } else if (mahdist < mah_lower_bound) { *(mu_inits_[fidx]) = grad_factor->kernel()->incrementMuInitInv(*(mu_inits_[fidx])); } @@ -293,11 +265,11 @@ void RISAM::incrementMuInits() { } } -/*********************************************************************************************************************/ +/* ************************************************************************* */ std::set RISAM::convexifyInvolvedFactors( const gtsam::NonlinearFactorGraph& new_factors, const gtsam::Values& new_theta, - const boost::optional> extra_gnc_involved_keys, + const std::optional> extra_gnc_involved_keys, gtsam::ISAM2UpdateParams& update_params, UpdateResult& update_result) { // Gather all involved keys - Involved keys are directly induced by the new // factors @@ -322,14 +294,14 @@ std::set RISAM::convexifyInvolvedFactors( // Gather all affected keys - Super set of involved keys including keys // modified by user params auto [affected_keys, is_batch_update] = - solver_->preComputeUpdateInfo(new_factors, new_theta, update_params); + solver_->predictUpdateInfo(new_factors, new_theta, update_params); // Convexify update-involved factors std::set convex_factors; for (gtsam::Key affected_key : affected_keys) { for (gtsam::FactorIndex fidx : variable_index_[affected_key]) { auto grad_factor = - boost::dynamic_pointer_cast(factors_.at(fidx)); + std::dynamic_pointer_cast(factors_.at(fidx)); if (grad_factor) { bool inside = true; // indicates that all variables touched by this // factor are in the affected set @@ -360,4 +332,4 @@ std::set RISAM::convexifyInvolvedFactors( return convex_factors; } -} // namespace risam \ No newline at end of file +} // namespace gtsam \ No newline at end of file diff --git a/gtsam/sam/RISAM.h b/gtsam/sam/RISAM.h index 5f5cbbd7f9..bbeffecf16 100644 --- a/gtsam/sam/RISAM.h +++ b/gtsam/sam/RISAM.h @@ -10,16 +10,14 @@ * -------------------------------------------------------------------------- */ /** - * @file RISAM.h - * @brief Robust Incremental Smoothing and Mapping (riSAM) - * @date October 2025 + * @file RISAM.h + * @brief Robust Incremental Smoothing and Mapping (riSAM) * @author Dan McGann + * @date October 2025 */ #pragma once #include - -#include "risam/DoglegLineSearch.h" -#include "risam/ExtendedISAM2.h" +#include namespace gtsam { @@ -36,8 +34,9 @@ class RISAM { /// are overriden in RISAM::RISAM() ISAM2Params isam2_params; /// @brief Optional params for custom incremental trust region methods - std::optional - optimization_params = DoglegLineSearch::Parameters(); + // std::optional + // optimization_params = DoglegLineSearch::Parameters(); + // TODO (dan) replace a method to configure DLLS /// @brief Flag to increment mu_init when we converge in values for outlier /// factors @@ -74,9 +73,8 @@ class RISAM { protected: /// @brief Configuration parameters for the riSAM algorithm Parameters params_; - /// @brief The encapsulated iSAM2 algorithm. We use ExtendedISAM2 to get some - /// additional functionality see ExtendedISAM2 for details - std::unique_ptr solver_; + /// @brief The encapsulated iSAM2 algorithm. + std::unique_ptr solver_; /// @brief The current control parameter values for all factors. If factor i /// is a GraduatedFactor mu_[i] is a reference to that factors internal state. /// If factor i is not Graduated mu_[i] is a pointer to a value of Zero @@ -127,8 +125,7 @@ class RISAM { */ // Construct the encapsulated solver using the modified parameters. - solver_ = std::make_unique(params_.isam2_params, - params_.optimization_params); + solver_ = std::make_unique(params_.isam2_params); } /// @brief Update Interface. See ISAM2 docs for details as parameters match @@ -139,12 +136,11 @@ class RISAM { UpdateResult update( const NonlinearFactorGraph& new_factors = NonlinearFactorGraph(), const Values& new_theta = Values(), - const boost::optional> extra_gnc_involved_keys = - boost::none, + const std::optional> extra_gnc_involved_keys = std::nullopt, const FactorIndices& remove_factor_indices = FactorIndices(), - const boost::optional>& constrained_keys = boost::none, - const boost::optional>& no_relin_keys = boost::none, - const boost::optional>& extra_reelim_keys = boost::none, + const std::optional>& constrained_keys = std::nullopt, + const std::optional>& no_relin_keys = std::nullopt, + const std::optional>& extra_reelim_keys = std::nullopt, bool force_relinearize = false); /// @brief Update Interface. See ISAM2 docs for details as parameters match @@ -153,7 +149,7 @@ class RISAM { /// robust update UpdateResult update( const NonlinearFactorGraph& new_factors, const Values& new_theta, - const boost::optional> extra_gnc_involved_keys, + const std::optional> extra_gnc_involved_keys, const ISAM2UpdateParams& update_params); /// @brief Returns the current estimate from the solver @@ -162,17 +158,6 @@ class RISAM { /// @brief Returns the underlying factors of the system NonlinearFactorGraph getFactorsUnsafe() { return factors_; } - /** @brief Permits manual modification of the solver's linearization point. - * This function can be used for forcefully set the linearization point used - * by the solver to a specific value. The variable that is modified should be - * marked for re-elimination during the next update to see effects. WARN: This - * function can have strange effects and should only be used by developers who - * know what they are doing. - * @param key: The variable for which to modify the linearization point - * @param value: The new linearization point - */ - void forceSetLinearization(const Key& key, const Value& value); - /** @brief Returns the set of measurements (identified by their factor index) * that have been deems outliers. riSAM defines a measurement as an outlier if * its current residual is greater than the chi2_threshold provided @@ -192,7 +177,7 @@ class RISAM { */ UpdateResult updateRobust( const NonlinearFactorGraph& new_factors, const Values& new_theta, - const boost::optional> extra_gnc_involved_keys, + const std::optional> extra_gnc_involved_keys, const ISAM2UpdateParams& update_params); /** @brief Update housekeeping information for riSAM this involved: @@ -233,7 +218,7 @@ class RISAM { */ std::set convexifyInvolvedFactors( const NonlinearFactorGraph& new_factors, const Values& new_theta, - const boost::optional> extra_gnc_involved_keys, + const std::optional> extra_gnc_involved_keys, ISAM2UpdateParams& internal_update_params, UpdateResult& update_result); }; diff --git a/gtsam/sam/RISAMGraduatedFactor.cpp b/gtsam/sam/RISAMGraduatedFactor.cpp index aef553906e..f9a97a010e 100644 --- a/gtsam/sam/RISAMGraduatedFactor.cpp +++ b/gtsam/sam/RISAMGraduatedFactor.cpp @@ -9,9 +9,9 @@ * -------------------------------------------------------------------------- */ -#include "risam/GraduatedFactor.h" +#include -namespace risam { +namespace gtsam { /* ************************************************************************* */ GraduatedFactor::GraduatedFactor(GraduatedKernel::shared_ptr kernel) : kernel_(kernel) { diff --git a/gtsam/sam/RISAMGraduatedFactor.h b/gtsam/sam/RISAMGraduatedFactor.h index 38dbe65c1d..dfd536d4b5 100644 --- a/gtsam/sam/RISAMGraduatedFactor.h +++ b/gtsam/sam/RISAMGraduatedFactor.h @@ -19,14 +19,14 @@ #pragma once #include -#include "risam/GraduatedKernel.h" -#include "risam/RISAM.h" +#include +#include -namespace risam { +namespace gtsam { class GraduatedFactor { /** TYPES **/ public: - typedef boost::shared_ptr shared_ptr; + typedef std::shared_ptr shared_ptr; /** FIELDS **/ protected: @@ -86,7 +86,7 @@ class GenericGraduatedFactor : public FACTOR_TYPE, public GraduatedFactor { /** TYPES **/ public: - typedef boost::shared_ptr> shared_ptr; + typedef std::shared_ptr> shared_ptr; /**INTERFACE**/ public: @@ -103,7 +103,7 @@ class GenericGraduatedFactor : public FACTOR_TYPE, public GraduatedFactor { /// @brief makes a deep copy gtsam::NonlinearFactor::shared_ptr clone() const override { - return boost::static_pointer_cast( + return std::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr( new GenericGraduatedFactor(*this))); } @@ -148,7 +148,7 @@ class GenericGraduatedFactor : public FACTOR_TYPE, public GraduatedFactor { for (size_t i = 0; i < Ablocks.size(); i++) { Ablock_map[keys[i]] = Ablocks[i]; } - return boost::make_shared(Ablock_map, b); + return std::make_shared(Ablock_map, b); } /* ************************************************************************* @@ -179,7 +179,7 @@ GenericGraduatedFactor make_graduated(Args&&... args) { template typename GenericGraduatedFactor::shared_ptr make_shared_graduated( Args&&... args) { - return boost::make_shared>( + return std::make_shared>( std::forward(args)...); } } // namespace risam \ No newline at end of file diff --git a/gtsam/sam/RISAMGraduatedKernel.cpp b/gtsam/sam/RISAMGraduatedKernel.cpp index f901e2243d..9ba73b6d42 100644 --- a/gtsam/sam/RISAMGraduatedKernel.cpp +++ b/gtsam/sam/RISAMGraduatedKernel.cpp @@ -1,17 +1,18 @@ + +#include + #include -#include "risam/GraduatedKernel.h" -#include "risam/Utilities.h" -namespace risam { +namespace gtsam { -/*********************************************************************************************************************/ +/* ************************************************************************* */ double SIGKernel::error(const double& residual, const double& mu) const { double r2 = residual * residual; double c2 = params_.shape_param * params_.shape_param; return 0.5 * (c2 * r2) / (c2 + pow(r2, mu)); } -/*********************************************************************************************************************/ +/* ************************************************************************* */ double SIGKernel::weight(const double& residual, const double& mu) const { double r2 = residual * residual; double c2 = params_.shape_param * params_.shape_param; @@ -19,7 +20,7 @@ double SIGKernel::weight(const double& residual, const double& mu) const { return (c2 * (c2 + pow(r2, mu) * (1 - mu))) / (sqrt_denom * sqrt_denom); } -/*********************************************************************************************************************/ +/* ************************************************************************* */ double SIGKernel::updateMu(const double& mu, const double& residual, const size_t& update_count) const { if (params_.mu_update_strat == MuUpdateStrategy::MCGANN_2023) { @@ -39,11 +40,13 @@ double SIGKernel::updateMu(const double& mu, const double& residual, } if (update_count == 0 && residual > params_.shape_param / std::sqrt(3) && residual < (*params_.kang_residual_threshold)) { - double mu_inflection = bisectionMuSearch( - [residual, this](double new_mu) { - return secondDerivative(residual, new_mu); - }, - 0.5, 1.0, 1e-9); + // TODO (dan) remove + double mu_inflection = 0.5; + // bisectionMuSearch( + // [residual, this](double new_mu) { + // return secondDerivative(residual, new_mu); + // }, + // 0.5, 1.0, 1e-9); // If the inflection point is less than the current mu_init skip to // mu_final return mu_inflection <= mu ? 1.0 : mu_inflection; @@ -60,13 +63,15 @@ double SIGKernel::updateMu(const double& mu, const double& residual, // 3 Step process [mu_init, mu*, 1] where mu* is the inflection point for // the current residual if (update_count == 0 && residual > params_.shape_param / std::sqrt(3)) { - double mu_inflection = bisectionMuSearch( - [residual, this](double new_mu) { - return secondDerivative(residual, new_mu); - }, - 0.5, 1.0, 1e-9); - // If the inflection point is less than the current mu_init skip to - // mu_final + // TODO (dan) remove + double mu_inflection = 0.5; + // bisectionMuSearch( + // [residual, this](double new_mu) { + // return secondDerivative(residual, new_mu); + // }, + // 0.5, 1.0, 1e-9); + // If the inflection point is less than the current mu_init skip to + // mu_final return mu_inflection <= mu ? 1.0 : mu_inflection; } else { // If update_count > 0 then we have already run the mu* update, so @@ -109,7 +114,7 @@ double SIGKernel::updateMu(const double& mu, const double& residual, } } -/*********************************************************************************************************************/ +/* ************************************************************************* */ double SIGKernel::incrementMuInit(const double& mu) const { if (params_.mu_init_inc_strat == MuInitIncrementStrategy::MCGANN_2023) { // Mu init update Empirically discovered and presented in the orig riSAM @@ -140,7 +145,7 @@ double SIGKernel::incrementMuInit(const double& mu) const { } } -/*********************************************************************************************************************/ +/* ************************************************************************* */ double SIGKernel::incrementMuInitInv(const double& mu) const { if (params_.mu_init_inc_strat == MuInitIncrementStrategy::MCGANN_2023) { // Mu init update Empirically discovered and presented in the orig riSAM @@ -171,16 +176,16 @@ double SIGKernel::incrementMuInitInv(const double& mu) const { } } -/*********************************************************************************************************************/ +/* ************************************************************************* */ bool SIGKernel::isMuConverged(const double& mu) const { return mu >= convergence_thresh_; } -/*********************************************************************************************************************/ +/* ************************************************************************* */ double SIGKernel::shapeParamFromInfThresh(double influence_thresh, size_t dof, double chi2_outlier_thresh) { - const double outlier_residual_thresh = boost::math::quantile( - boost::math::chi_squared_distribution(dof), chi2_outlier_thresh); + double outlier_residual_thresh = + internal::chi_squared_quantile(dof, chi2_outlier_thresh); // Equation from taking derivative of SIGKernel setting equal to // influence_thresh and solving for mu=1 const double t1 = @@ -190,13 +195,12 @@ double SIGKernel::shapeParamFromInfThresh(double influence_thresh, size_t dof, return std::sqrt(-((t1 + t2) / t3)); } -/*********************************************************************************************************************/ +/* ************************************************************************* */ double SIGKernel::shapeParamFromChi2(size_t dof, double chi2_threshold) { - return boost::math::quantile( - boost::math::chi_squared_distribution(dof), chi2_threshold); + return internal::chi_squared_quantile(dof, chi2_threshold); } -/*********************************************************************************************************************/ +/* ************************************************************************* */ double SIGKernel::secondDerivative(const double& r, const double& mu) const { // Setup helpful values double c2 = params_.shape_param * params_.shape_param; @@ -212,4 +216,4 @@ double SIGKernel::secondDerivative(const double& r, const double& mu) const { return numerator / denominator; } -} // namespace risam \ No newline at end of file +} // namespace gtsam \ No newline at end of file diff --git a/gtsam/sam/RISAMGraduatedKernel.h b/gtsam/sam/RISAMGraduatedKernel.h index 1dd194b9fb..852b62fd25 100644 --- a/gtsam/sam/RISAMGraduatedKernel.h +++ b/gtsam/sam/RISAMGraduatedKernel.h @@ -15,15 +15,15 @@ #pragma once #include #include +#include -#include #include -namespace risam { +namespace gtsam { class GraduatedKernel { /** TYPES **/ public: - typedef boost::shared_ptr shared_ptr; + typedef std::shared_ptr shared_ptr; /** FIELDS **/ protected: @@ -100,7 +100,7 @@ class SIGKernel : public GraduatedKernel { /** TYPES **/ public: /// @brief Shortcut for shared pointer - typedef boost::shared_ptr shared_ptr; + typedef std::shared_ptr shared_ptr; /// @brief Strategies for updating Mu between GNC iterations of riSAM enum class MuUpdateStrategy { @@ -229,4 +229,4 @@ class SIGKernel : public GraduatedKernel { /// @brief Second derivative of the SIG Kernel wrt r double secondDerivative(const double& r, const double& mu) const; }; -} // namespace risam \ No newline at end of file +} // namespace gtsam \ No newline at end of file From f5574abe58afa94f373471016c069c5da970a15f Mon Sep 17 00:00:00 2001 From: Dan McGann Date: Wed, 24 Dec 2025 14:03:57 -0500 Subject: [PATCH 04/17] Update docs for new formatting --- gtsam/sam/RISAM.cpp | 56 ++++++++++++++++--------------- gtsam/sam/RISAM.h | 57 ++++++++++++++------------------ gtsam/sam/RISAMGraduatedFactor.h | 22 +++++------- gtsam/sam/RISAMGraduatedKernel.h | 4 --- 4 files changed, 63 insertions(+), 76 deletions(-) diff --git a/gtsam/sam/RISAM.cpp b/gtsam/sam/RISAM.cpp index a3cc0868f3..e9dbebb9d5 100644 --- a/gtsam/sam/RISAM.cpp +++ b/gtsam/sam/RISAM.cpp @@ -46,8 +46,7 @@ RISAM::UpdateResult RISAM::update( const gtsam::Values& new_theta, const std::optional> extra_gnc_involved_keys, const gtsam::ISAM2UpdateParams& update_params) { - // Determine if the update includes any graduated factors (i.e. potential - // outliers) + // Determine if the update includes any graduated factors bool update_includes_potential_outliers = false; for (auto& factor : new_factors) { auto grad_factor = std::dynamic_pointer_cast(factor); @@ -58,7 +57,7 @@ RISAM::UpdateResult RISAM::update( updateHouseKeeping(new_factors, update_params); // Run the update: robust if we have potential outliers (or if requested) - // otherwise standard iSAM2 + // otherwise standard iSAM2 to improve efficiency UpdateResult result; if (extra_gnc_involved_keys || update_includes_potential_outliers) { result = updateRobust(new_factors, new_theta, extra_gnc_involved_keys, @@ -84,8 +83,8 @@ RISAM::UpdateResult RISAM::updateRobust( // Setup the result structure UpdateResult update_result; - // Create params for the initial update, note these may be modified by - // convexifyInvolvedFactors + // Create params for the initial update + // NOTE: These may be modified by convexifyInvolvedFactors gtsam::ISAM2UpdateParams initial_update_params = update_params; // Convexify involved factors @@ -106,9 +105,8 @@ RISAM::UpdateResult RISAM::updateRobust( // Run GNC iterations until all convex factors have converged w.r.t. mu_ gtsam::FactorIndices remaining_convex_factors(convex_factors.begin(), convex_factors.end()); - gtsam::ISAM2UpdateParams - update_params_internal; // Used to force updates convex factors + ensure - // better ordering + // Internal params force update convex factors + ensure better ordering + gtsam::ISAM2UpdateParams update_params_internal; while (remaining_convex_factors.size() > 0) { // Get the current solution gtsam::Values current_est = solver_->calculateEstimate(); @@ -125,12 +123,13 @@ RISAM::UpdateResult RISAM::updateRobust( mu_update_count[fidx]++; } + // Setup Internal Update Parameters + // Force update to convex factors + update_params_internal.extraReelimKeys = convexKeys; + // Prevent iSAM2 from ordering convex factors first + update_params_internal.constrainedKeys = gtsam::FastMap(); + // Run the Update, re-eliminating the subproblem defined at this time-step - update_params_internal.extraReelimKeys = - convexKeys; // Force update to convex factors - update_params_internal.constrainedKeys = - gtsam::FastMap(); // Prevent iSAM2 from ordering - // convex factors first solver_->update(gtsam::NonlinearFactorGraph(), gtsam::Values(), update_params_internal); solver_->calculateEstimate(); @@ -146,8 +145,8 @@ RISAM::UpdateResult RISAM::updateRobust( remaining_convex_factors = new_remaining_convex_factors; } + // Orig RISAM preformed 1 extra iteration for better convergence for (size_t i = 0; i < params_.number_extra_iters; i++) { - // Orig RISAM preformed 1 extra iteration solver_->update(gtsam::NonlinearFactorGraph(), gtsam::Values(), gtsam::ISAM2UpdateParams()); solver_->calculateEstimate(); @@ -194,8 +193,7 @@ void RISAM::updateHouseKeeping(const gtsam::NonlinearFactorGraph& new_factors, // Add a mu and mu_init entry for each factor augmentMu(new_factors, new_factor_indices); - // Once we have added new factors we can remove any factors and corresponding - // info + // Once we add new factors we can remove any factors and corresponding info gtsam::NonlinearFactorGraph removed_factors; removed_factors.reserve(update_params.removeFactorIndices.size()); for (const auto fidx : update_params.removeFactorIndices) { @@ -240,13 +238,18 @@ void RISAM::augmentMu(const gtsam::NonlinearFactorGraph& new_factors, /* ************************************************************************* */ void RISAM::incrementMuInits() { + // Compute Average Delta gtsam::VectorValues delta = solver_->getDelta(); - if (delta.norm() / delta.size() < - params_.outlier_mu_avg_var_convergence_thresh) { + bool is_sufficient_delta = delta.norm() / delta.size() < + params_.outlier_mu_avg_var_convergence_thresh; + + // Evaluate All Graduated factors if sufficient average delta + if (is_sufficient_delta) { gtsam::Values theta = solver_->calculateEstimate(); for (auto fidx : factors_to_check_status_) { auto grad_factor = std::dynamic_pointer_cast(factors_[fidx]); + double mahdist = grad_factor->residual(theta); const double mah_upper_bound = internal::chi_squared_quantile( factors_[fidx]->dim(), params_.outlier_mu_chisq_upper_bound); @@ -261,6 +264,7 @@ void RISAM::incrementMuInits() { grad_factor->kernel()->incrementMuInitInv(*(mu_inits_[fidx])); } } + // Reset accumulator factors_to_check_status_.clear(); } } @@ -271,8 +275,7 @@ std::set RISAM::convexifyInvolvedFactors( const gtsam::Values& new_theta, const std::optional> extra_gnc_involved_keys, gtsam::ISAM2UpdateParams& update_params, UpdateResult& update_result) { - // Gather all involved keys - Involved keys are directly induced by the new - // factors + // Gather all involved keys - Directly induced by the new factors gtsam::KeySet new_factor_keys = new_factors.keys(); gtsam::KeySet involved_keys = solver_->traverseTop(new_factors.keyVector()); involved_keys.insert(new_factor_keys.begin(), new_factor_keys.end()); @@ -283,8 +286,8 @@ std::set RISAM::convexifyInvolvedFactors( extra_gnc_involved_keys->end()); // For any user specified involved keys also add them to the extra-reelim if (!update_params.extraReelimKeys) { - update_params.extraReelimKeys = - gtsam::FastList(); // create if it does not exist + // Create container if it does not exist + update_params.extraReelimKeys = gtsam::FastList(); } update_params.extraReelimKeys->insert(update_params.extraReelimKeys->end(), extra_gnc_involved_keys->begin(), @@ -303,10 +306,11 @@ std::set RISAM::convexifyInvolvedFactors( auto grad_factor = std::dynamic_pointer_cast(factors_.at(fidx)); if (grad_factor) { - bool inside = true; // indicates that all variables touched by this - // factor are in the affected set - bool update_involved = false; // indicates a factor touches at least - // one variable in the involved set + // Indicates that all variables in this factor are in the affected set + bool inside = true; + // Indicates a factor touches at least one variable in the involved set + bool update_involved = false; + // Compute inside and update involved for (gtsam::Key factor_key : factors_.at(fidx)->keys()) { inside = inside && affected_keys.count(factor_key); update_involved = update_involved || involved_keys.count(factor_key); diff --git a/gtsam/sam/RISAM.h b/gtsam/sam/RISAM.h index bbeffecf16..ba3331417f 100644 --- a/gtsam/sam/RISAM.h +++ b/gtsam/sam/RISAM.h @@ -33,21 +33,15 @@ class RISAM { /// @brief The parameters for the encapsulated iSAM2 algorithm @Note some /// are overriden in RISAM::RISAM() ISAM2Params isam2_params; - /// @brief Optional params for custom incremental trust region methods - // std::optional - // optimization_params = DoglegLineSearch::Parameters(); - // TODO (dan) replace a method to configure DLLS - /// @brief Flag to increment mu_init when we converge in values for outlier - /// factors + /// @brief Flag to increment mu_init when the value estimate converges // See RISAM::IncrementMuInit for details bool increment_outlier_mu{true}; /// @brief Increment mu_init if chi^2 > upper bound double outlier_mu_chisq_upper_bound{0.95}; /// @brief Decrement mu_init if chi^2 < lower bound double outlier_mu_chisq_lower_bound{0.25}; - /// @brief Average variable delta.norm() threshold used to identify variable - /// convergence + /// @brief Average variable delta threshold to identify value convergence double outlier_mu_avg_var_convergence_thresh{0.01}; /// @brief The number of extra iterations to perform after mu convergence size_t number_extra_iters{1}; @@ -57,8 +51,8 @@ class RISAM { * See below for details about the information included */ struct UpdateResult { - /// @brief The iSAM2 result from the first internal update [Note riSAM may - /// run multiple internal updates] + /// @brief The iSAM2 result from the first internal update + /// NOTE: riSAM may run multiple internal ISAM2 updates ISAM2Result isam2_result; /// @brief The set variables directly involved in the update @@ -77,10 +71,10 @@ class RISAM { std::unique_ptr solver_; /// @brief The current control parameter values for all factors. If factor i /// is a GraduatedFactor mu_[i] is a reference to that factors internal state. - /// If factor i is not Graduated mu_[i] is a pointer to a value of Zero + /// If factor i is not Graduated mu_[i] is a pointer to a value of zero FastVector> mu_; /// @brief The current initial control parameter values for all factors. If - /// factor i is not Graduated mu_[i] is a pointer to a value of Zero + /// factor i is not Graduated mu_[i] is a pointer to a value of zero FastVector> mu_inits_; /// @brief The set of GraduatedFactors that have been convexified since the /// last mu_init_ increment These are the factors that we can increment mu's @@ -89,14 +83,14 @@ class RISAM { /** * RISAM maintains its own variable index and factors. These match exactly - * that of the underlying iSAM2 solver, but lead it since we update RISAM - * housekeeping before updating the underlying solver. + * that of the underlying iSAM2 solver, but lead ahead of it since we update + * RISAM housekeeping before updating the underlying solver. */ - /// @brief VariableIndex for the underlying system matches that of solver_ - /// after every update + /// @brief VariableIndex for the underlying system + /// INVARIANT: matches that of solver_ after every update VariableIndex variable_index_; - /// @brief The Factors for the underlying system matches that of solver_ after - /// every update + /// @brief The Factors for the underlying system + /// INVARIANT: matches that of solver_ after every update NonlinearFactorGraph factors_; /** INTERFACE **/ @@ -105,15 +99,9 @@ class RISAM { * configuration. Note this constructor will override some values in * params.isam2_params. * @param params: The configuration parameters for this instance of riSAM - * */ RISAM(const Parameters& params) : params_(params) { - // Override user preferences for iSAM2 params - params_.isam2_params.relinearizeSkip = - 1; // We must have relinearization skip as 1 - params_.isam2_params.cacheLinearizedFactors = - false; // We must not use cached factors - /** Param override explanation + /** Override user preferences for iSAM2 params * To ensure that we update the current estimated theta_ after each * intermediate GNC iteration we must use relinearizeSkip = 1 so that we * allow relinearization at any step it is needed. @@ -123,6 +111,10 @@ class RISAM { * to these mu_ values we must turn off caching that could result in old mu_ * being used. */ + // We must have relinearization skip as 1 + params_.isam2_params.relinearizeSkip = 1; + // We must not use cached factors + params_.isam2_params.cacheLinearizedFactors = false; // Construct the encapsulated solver using the modified parameters. solver_ = std::make_unique(params_.isam2_params); @@ -132,7 +124,7 @@ class RISAM { /// (almost) exactly /// @param extra_gnc_involved_keys - overrides internal RISAM logic and /// performs a robust update. All extra involved keys will be treated as being - /// apart of the current update wrt factor convexification + /// a part of the current update with respect to factor convexification UpdateResult update( const NonlinearFactorGraph& new_factors = NonlinearFactorGraph(), const Values& new_theta = Values(), @@ -159,7 +151,7 @@ class RISAM { NonlinearFactorGraph getFactorsUnsafe() { return factors_; } /** @brief Returns the set of measurements (identified by their factor index) - * that have been deems outliers. riSAM defines a measurement as an outlier if + * that have been deemed outliers. riSAM defines a measurement as an outlier if * its current residual is greater than the chi2_threshold provided * @param chi2_outlier_thresh - The chi2 threshold used to define outliers * WARN: Potentially slow since we iterate over all factors @@ -180,11 +172,10 @@ class RISAM { const std::optional> extra_gnc_involved_keys, const ISAM2UpdateParams& update_params); - /** @brief Update housekeeping information for riSAM this involved: - * 1. Determining the indicies that each factor in new_factors will be - * inserted into - * 2. Update the riSAM fields: factors_ and variable_index_ (they will be - * ahead of those in solver_ until solver.update() is called) + /** @brief Update housekeeping information for riSAM this involves: + * 1. Determining the indicies for new_factors + * 2. Update the riSAM fields: factors_ and variable_index_ + * NOTE: they will be ahead of those in solver_ until solver.update() is called * 3. Update mu_ and mu_init_ for the new factors * @param new_factors: The new factors for the current update * @param update_params: The update parameters for the current update @@ -201,7 +192,7 @@ class RISAM { /** @brief Increments mu_inits_ for any factor recently convexified. * Used to mitigate the long-term affect of measurements that we are confident - * are outliers + * are outliers. */ void incrementMuInits(); diff --git a/gtsam/sam/RISAMGraduatedFactor.h b/gtsam/sam/RISAMGraduatedFactor.h index dfd536d4b5..dda935252e 100644 --- a/gtsam/sam/RISAMGraduatedFactor.h +++ b/gtsam/sam/RISAMGraduatedFactor.h @@ -18,9 +18,8 @@ */ #pragma once #include - -#include #include +#include namespace gtsam { class GraduatedFactor { @@ -49,14 +48,11 @@ class GraduatedFactor { /// @brief Copy constructor GraduatedFactor(const GraduatedFactor& other); - /** @brief Linearize this factor at a specific point, using the specified - * convexification parameter mu - * @param current_estimate: the variable estimate at which to linearize the - * Factor - * @param mu: the current value of the convexification parameter for this - * factor + /** @brief Linearize this factor using the convexification parameter mu + * @param current_estimate: the estimate at which to linearize the factor + * @param mu: the current value of the convexification parameter */ - virtual gtsam::GaussianFactor::shared_ptr linearizeRobust( + virtual gtsam::GaussianFactor::shared_ptr linearizeGraduated( const gtsam::Values& current_estimate) const = 0; /// @brief returns the residual of the factor @@ -66,7 +62,7 @@ class GraduatedFactor { virtual double robustResidual( const gtsam::Values& current_estimate) const = 0; - /// @brief Returns the value of \mu_{init} for this graduated kernel + /// @brief Returns the kernel for this graduated factor const GraduatedKernel::shared_ptr kernel() const; /// @brief Updates the Kernel of this Graduated Factor @@ -110,7 +106,7 @@ class GenericGraduatedFactor : public FACTOR_TYPE, public GraduatedFactor { gtsam::GaussianFactor::shared_ptr linearize( const gtsam::Values& current_estimate) const override { - return linearizeRobust(current_estimate); + return linearizeGraduated(current_estimate); } /// @brief For graduated factors return 0.5 \rho(r)^2 @@ -119,7 +115,7 @@ class GenericGraduatedFactor : public FACTOR_TYPE, public GraduatedFactor { } /** GRADUATED INTERFACE **/ - gtsam::GaussianFactor::shared_ptr linearizeRobust( + gtsam::GaussianFactor::shared_ptr linearizeGraduated( const gtsam::Values& current_estimate) const override { double r = residual(current_estimate); @@ -182,4 +178,4 @@ typename GenericGraduatedFactor::shared_ptr make_shared_graduated( return std::make_shared>( std::forward(args)...); } -} // namespace risam \ No newline at end of file +} // namespace gtsam \ No newline at end of file diff --git a/gtsam/sam/RISAMGraduatedKernel.h b/gtsam/sam/RISAMGraduatedKernel.h index 852b62fd25..389a2ff589 100644 --- a/gtsam/sam/RISAMGraduatedKernel.h +++ b/gtsam/sam/RISAMGraduatedKernel.h @@ -4,10 +4,6 @@ * towards \mu_{final} * * - * Citations: - * [2] - W. Kang et al. "Efficient Graduated Non-Convexity for Pose Graph - * Optimization", International Conference on Control, Automation and Systems - * (ICCAS), 2024 * * @author Dan McGann * @date Mar 2022 From b674f01c52cac6020a2585a3ddf16d67403946c5 Mon Sep 17 00:00:00 2001 From: Dan McGann Date: Mon, 19 Jan 2026 17:28:51 -0500 Subject: [PATCH 05/17] Cleanup Grad Factor+Kernel and Test --- gtsam/sam/RISAM.h | 27 ++++- gtsam/sam/RISAMGraduatedFactor.cpp | 3 +- gtsam/sam/RISAMGraduatedFactor.h | 60 +++++----- gtsam/sam/RISAMGraduatedKernel.cpp | 177 ++++------------------------- gtsam/sam/RISAMGraduatedKernel.h | 113 ++++++------------ gtsam/sam/tests/testRISAM.cpp | 164 ++++++++++++++++++++++++++ 6 files changed, 270 insertions(+), 274 deletions(-) create mode 100644 gtsam/sam/tests/testRISAM.cpp diff --git a/gtsam/sam/RISAM.h b/gtsam/sam/RISAM.h index ba3331417f..f7d24ce3d9 100644 --- a/gtsam/sam/RISAM.h +++ b/gtsam/sam/RISAM.h @@ -18,6 +18,8 @@ #pragma once #include #include +#include +#include namespace gtsam { @@ -151,8 +153,8 @@ class RISAM { NonlinearFactorGraph getFactorsUnsafe() { return factors_; } /** @brief Returns the set of measurements (identified by their factor index) - * that have been deemed outliers. riSAM defines a measurement as an outlier if - * its current residual is greater than the chi2_threshold provided + * that have been deemed outliers. riSAM defines a measurement as an outlier + * if its current residual is greater than the chi2_threshold provided * @param chi2_outlier_thresh - The chi2 threshold used to define outliers * WARN: Potentially slow since we iterate over all factors * @returns The set of factors that are considered outliers by the RISAM @@ -174,8 +176,9 @@ class RISAM { /** @brief Update housekeeping information for riSAM this involves: * 1. Determining the indicies for new_factors - * 2. Update the riSAM fields: factors_ and variable_index_ - * NOTE: they will be ahead of those in solver_ until solver.update() is called + * 2. Update the riSAM fields: factors_ and variable_index_ + * NOTE: they will be ahead of those in solver_ until solver.update() is + * called * 3. Update mu_ and mu_init_ for the new factors * @param new_factors: The new factors for the current update * @param update_params: The update parameters for the current update @@ -211,6 +214,22 @@ class RISAM { const NonlinearFactorGraph& new_factors, const Values& new_theta, const std::optional> extra_gnc_involved_keys, ISAM2UpdateParams& internal_update_params, UpdateResult& update_result); + + public: + /** STATIC HELPERS **/ + /** @brief Constructs a shared_ptr of FACTOR_TYPE graduated factor for riSAM + * This identifies a factor as a potential outlier measurement. + * @param kernel: The graduated kernel to use for this factor + * @param args: The arguments required to construct the FACTOR_TYPE + */ + template + static + typename GenericGraduatedFactor::shared_ptr + make_graduated(Args&&... args) { + return std::make_shared< + GenericGraduatedFactor>( + std::forward(args)...); + } }; } // namespace gtsam \ No newline at end of file diff --git a/gtsam/sam/RISAMGraduatedFactor.cpp b/gtsam/sam/RISAMGraduatedFactor.cpp index f9a97a010e..9496872835 100644 --- a/gtsam/sam/RISAMGraduatedFactor.cpp +++ b/gtsam/sam/RISAMGraduatedFactor.cpp @@ -35,5 +35,4 @@ void GraduatedFactor::updateKernel( kernel_ = new_kernel; *mu_ = new_kernel->muInit(); } - -} // namespace risam \ No newline at end of file +} // namespace gtsam \ No newline at end of file diff --git a/gtsam/sam/RISAMGraduatedFactor.h b/gtsam/sam/RISAMGraduatedFactor.h index dda935252e..f42ef6d3a0 100644 --- a/gtsam/sam/RISAMGraduatedFactor.h +++ b/gtsam/sam/RISAMGraduatedFactor.h @@ -18,10 +18,11 @@ */ #pragma once #include -#include #include namespace gtsam { + +/// @brief Graduated Factor for riSAM base class class GraduatedFactor { /** TYPES **/ public: @@ -34,7 +35,7 @@ class GraduatedFactor { /// @brief The unique mu control parameter for this factor std::shared_ptr mu_; - /// befriend RISAM to give access to these protected values + /// Befriend RISAM to give access to these protected values friend class RISAM; /**INTERFACE**/ @@ -74,6 +75,7 @@ class GraduatedFactor { virtual gtsam::NonlinearFactor::shared_ptr cloneUngraduated() const = 0; }; +/// @brief Instantiation of Graduated Factor wrapping any Nonlinear Factor template class GenericGraduatedFactor : public FACTOR_TYPE, public GraduatedFactor { static_assert(std::is_base_of::value, @@ -87,38 +89,30 @@ class GenericGraduatedFactor : public FACTOR_TYPE, public GraduatedFactor { /**INTERFACE**/ public: /** @brief Constructor + * @param kernel: The graduated kernel to use for this factor * @param args: The arguments required to construct the FACTOR_TYPE */ template GenericGraduatedFactor(GraduatedKernel::shared_ptr kernel, Args&&... args) : FACTOR_TYPE(std::forward(args)...), GraduatedFactor(kernel) {} - /// @brief Copy constructor + /// @brief Copy Constructor GenericGraduatedFactor(const GenericGraduatedFactor& other) : FACTOR_TYPE(other), GraduatedFactor(other.kernel_) {} - /// @brief makes a deep copy + /// @brief Makes a deep copy of the factor gtsam::NonlinearFactor::shared_ptr clone() const override { return std::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr( new GenericGraduatedFactor(*this))); } - gtsam::GaussianFactor::shared_ptr linearize( - const gtsam::Values& current_estimate) const override { - return linearizeGraduated(current_estimate); - } - - /// @brief For graduated factors return 0.5 \rho(r)^2 - double error(const gtsam::Values& values) const override { - return 0.5 * std::pow(robustResidual(values), 2); - } - - /** GRADUATED INTERFACE **/ + /// @brief Linearizes the factor using the current value of mu gtsam::GaussianFactor::shared_ptr linearizeGraduated( const gtsam::Values& current_estimate) const override { double r = residual(current_estimate); + // Use base factor to linearize gtsam::Matrix A; gtsam::Vector b; auto whitened_linear_system = FACTOR_TYPE::linearize(current_estimate); @@ -147,35 +141,33 @@ class GenericGraduatedFactor : public FACTOR_TYPE, public GraduatedFactor { return std::make_shared(Ablock_map, b); } - /* ************************************************************************* - */ + /// @brief Linearize the System @see NonlinearFactor::linearize + GaussianFactor::shared_ptr linearize( + const gtsam::Values& current_estimate) const override { + // Delegate to linearizeGraduated which is required by the GraduatedFactor + // Interface + return linearizeGraduated(current_estimate); + } + + /// @brief Returns 0.5 \rho(r)^2 + double error(const gtsam::Values& values) const override { + return 0.5 * std::pow(robustResidual(values), 2); + } + + /** GRADUATED INTERFACE **/ + /// @brief See GraduatedFactor::residual double residual(const gtsam::Values& current_estimate) const override { return sqrt(2.0 * FACTOR_TYPE::error(current_estimate)); } - /* ************************************************************************* - */ + /// @brief See GraduatedFactor::robustResidual double robustResidual(const gtsam::Values& current_estimate) const override { return kernel_->error(residual(current_estimate), *mu_); } - /* ************************************************************************* - */ + /// @brief See GraduatedFactor::cloneUngraduated gtsam::NonlinearFactor::shared_ptr cloneUngraduated() const override { return FACTOR_TYPE::clone(); } }; - -/** HELPERS **/ -template -GenericGraduatedFactor make_graduated(Args&&... args) { - return GenericGraduatedFactor(std::forward(args)...); -} - -template -typename GenericGraduatedFactor::shared_ptr make_shared_graduated( - Args&&... args) { - return std::make_shared>( - std::forward(args)...); -} } // namespace gtsam \ No newline at end of file diff --git a/gtsam/sam/RISAMGraduatedKernel.cpp b/gtsam/sam/RISAMGraduatedKernel.cpp index 9ba73b6d42..fa4fe8406f 100644 --- a/gtsam/sam/RISAMGraduatedKernel.cpp +++ b/gtsam/sam/RISAMGraduatedKernel.cpp @@ -8,14 +8,14 @@ namespace gtsam { /* ************************************************************************* */ double SIGKernel::error(const double& residual, const double& mu) const { double r2 = residual * residual; - double c2 = params_.shape_param * params_.shape_param; + double c2 = this->shape_param * this->shape_param; return 0.5 * (c2 * r2) / (c2 + pow(r2, mu)); } /* ************************************************************************* */ double SIGKernel::weight(const double& residual, const double& mu) const { double r2 = residual * residual; - double c2 = params_.shape_param * params_.shape_param; + double c2 = this->shape_param * this->shape_param; double sqrt_denom = c2 + pow(r2, mu); return (c2 * (c2 + pow(r2, mu) * (1 - mu))) / (sqrt_denom * sqrt_denom); } @@ -23,157 +23,17 @@ double SIGKernel::weight(const double& residual, const double& mu) const { /* ************************************************************************* */ double SIGKernel::updateMu(const double& mu, const double& residual, const size_t& update_count) const { - if (params_.mu_update_strat == MuUpdateStrategy::MCGANN_2023) { - // Mu update empirically discovered and presented in the orig riSAM paper - return std::min(1.0, mu + (mu + 0.1) * 1.2); - } else if (params_.mu_update_strat == MuUpdateStrategy::SIMPLE_HUBER) { - // Simple mu update of [mu_init, 0.5, 1.0] where 0.5 results in - // approximately the huber kernel - return mu < 0.5 ? 0.5 : 1.0; - } else if (params_.mu_update_strat == MuUpdateStrategy::KANG_CODE_2023) { - // 3 Step process [mu_init, mu*, 1] where mu* is the inflection point for - // the current residual - if (!params_.kang_residual_threshold) { - throw std::runtime_error( - "mu_update_strat == KANG_CODE_2023 but no kang_residual_threshold " - "provided in params"); - } - if (update_count == 0 && residual > params_.shape_param / std::sqrt(3) && - residual < (*params_.kang_residual_threshold)) { - // TODO (dan) remove - double mu_inflection = 0.5; - // bisectionMuSearch( - // [residual, this](double new_mu) { - // return secondDerivative(residual, new_mu); - // }, - // 0.5, 1.0, 1e-9); - // If the inflection point is less than the current mu_init skip to - // mu_final - return mu_inflection <= mu ? 1.0 : mu_inflection; - } else { - // If update_count > 0 then we have already run the mu* update, so - // return 1.0 If r < c/sqrt(3) then the result will be > 1.0 so skip - // search and return 1.0 If r > kang_residual_threshold after the convex - // iter then this measurement is most def an outlier so go to 1.0 Note: - // This final condition is present in the code, but not in the paper by - // Kang et al. - return 1.0; - } - } else if (params_.mu_update_strat == MuUpdateStrategy::KANG_PAPER_2023) { - // 3 Step process [mu_init, mu*, 1] where mu* is the inflection point for - // the current residual - if (update_count == 0 && residual > params_.shape_param / std::sqrt(3)) { - // TODO (dan) remove - double mu_inflection = 0.5; - // bisectionMuSearch( - // [residual, this](double new_mu) { - // return secondDerivative(residual, new_mu); - // }, - // 0.5, 1.0, 1e-9); - // If the inflection point is less than the current mu_init skip to - // mu_final - return mu_inflection <= mu ? 1.0 : mu_inflection; - } else { - // If update_count > 0 then we have already run the mu* update, so - // return 1.0 If r < c/sqrt(3) then the result will be > 1.0 so skip - // search and return 1.0 - return 1.0; - } - } else if (params_.mu_update_strat == MuUpdateStrategy::ONESTEP) { - // return mu_final after running with mu_init - return 1.0; - } else if (params_.mu_update_strat == MuUpdateStrategy::HALVES) { - // Arithmetic Series: mu[t+1] = mu[t] + (1/2) - return std::min(1.0, mu + 0.5); - } else if (params_.mu_update_strat == MuUpdateStrategy::THIRDS) { - // Arithmetic Series: mu[t+1] = mu[t] + (1/3) - return std::min(1.0, mu + (1.0 / 3.0)); - } else if (params_.mu_update_strat == MuUpdateStrategy::FOURTHS) { - // Arithmetic Series: mu[t+1] = mu[t] + (1/4) - return std::min(1.0, mu + 0.25); - } else if (params_.mu_update_strat == MuUpdateStrategy::ASYM_FOURTHS) { - // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.5 if (t==0) else mu[t] - // + 0.25 - if (update_count == 0) return std::min(1.0, mu + 0.5); - return std::min(1.0, mu + 0.25); - } else if (params_.mu_update_strat == MuUpdateStrategy::NONCONVEX) { - // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.7 if (t==0) else mu[t] - // + 0.2 - if (update_count == 0) return std::min(1.0, mu + 0.7); - return std::min(1.0, mu + 0.2); - } else if (params_.mu_update_strat == MuUpdateStrategy::STABLE) { - // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.5 if (t==0), mu[t] + - // 0.4 if (t==1), else mu[t] + 0.05 My attempt to build the most stable - // method possible given lessons learned from multi-robot conditions - if (update_count == 0) return std::min(1.0, mu + 0.5); - if (update_count == 1) return std::min(1.0, mu + 0.4); - return std::min(1.0, mu + 0.05); - } else { - throw std::runtime_error( - "updateMu: requested MuUpdateStrategy is not implemented."); - } + return this->mu_update_strat(mu, residual, update_count); } /* ************************************************************************* */ double SIGKernel::incrementMuInit(const double& mu) const { - if (params_.mu_init_inc_strat == MuInitIncrementStrategy::MCGANN_2023) { - // Mu init update Empirically discovered and presented in the orig riSAM - // paper [Non-Symmetric] - return std::min(1.0, mu + (mu + 0.1) * 1.2); - } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_3) { - // Simple update with 3 steps from init to final - return std::min(1.0, mu + (1.0 / 3.0)); - } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_4) { - // Simple update with 4 steps from init to final - return std::min(1.0, mu + 0.25); - } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_5) { - // Simple update with 5 steps from init to final - return std::min(1.0, mu + 0.2); - } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_10) { - // Simple update with 10 steps from init to final - return std::min(1.0, mu + 0.1); - } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_20) { - // Simple update with 20 steps from init to final - return std::min(1.0, mu + 0.05); - } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_50) { - // Simple update with 50 steps from init to final - return std::min(1.0, mu + 0.02); - } else { - throw std::runtime_error( - "incrementMuInit: requested MuInitIncrementStrategy is not " - "implemented."); - } + return std::min(1.0, mu + this->mu_init_increment); } /* ************************************************************************* */ double SIGKernel::incrementMuInitInv(const double& mu) const { - if (params_.mu_init_inc_strat == MuInitIncrementStrategy::MCGANN_2023) { - // Mu init update Empirically discovered and presented in the orig riSAM - // paper [Non-Symmetric] - return std::max(0.0, mu - 0.1); - } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_3) { - // Simple Update with 3 steps from init to final - return std::max(0.0, mu - (1.0 / 3.0)); - } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_4) { - // Simple Update with 4 steps from init to final - return std::max(0.0, mu - (1.0 / 4.0)); - } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_5) { - // Simple update with 5 steps from init to final - return std::max(0.0, mu - 0.2); - } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_10) { - // Simple update with 10 steps from init to final - return std::max(0.0, mu - 0.1); - } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_20) { - // Simple update with 20 steps from init to final - return std::max(0.0, mu - 0.05); - } else if (params_.mu_init_inc_strat == MuInitIncrementStrategy::EQUAL_50) { - // Simple update with 50 steps from init to final - return std::max(0.0, mu - 0.02); - } else { - throw std::runtime_error( - "incrementMuInitInv: requested MuInitIncrementStrategy is not " - "implemented."); - } + return std::max(0.0, mu - this->mu_init_increment); } /* ************************************************************************* */ @@ -201,19 +61,20 @@ double SIGKernel::shapeParamFromChi2(size_t dof, double chi2_threshold) { } /* ************************************************************************* */ -double SIGKernel::secondDerivative(const double& r, const double& mu) const { - // Setup helpful values - double c2 = params_.shape_param * params_.shape_param; - double c4 = c2 * c2; - double c6 = c4 * c2; - - // Calculate - double numerator = - c6 - // Constant - (c4 * (mu + 2) * (2 * mu - 1) * std::pow(r, 2 * mu)) + // term 1 - (c2 * (mu - 1) * (2 * mu - 1) * std::pow(r, 4 * mu)); // term 2 - double denominator = std::pow(c2 + std::pow(r, 2 * mu), 3); +double SIGKernel::muUpdateMcGann2023(const double& mu, const double& residual, + const size_t& update_count) { + return std::min(1.0, mu + (mu + 0.1) * 1.2); +} - return numerator / denominator; +/* ************************************************************************* */ +double SIGKernel::muUpdateStable(const double& mu, const double& residual, + const size_t& update_count) { + // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.5 if (t==0), mu[t] + + // 0.4 if (t==1), else mu[t] + 0.05 + // Authors attempt to build the most stable method possible from years of + // using the algorithm. + if (update_count == 0) return std::min(1.0, mu + 0.5); + if (update_count == 1) return std::min(1.0, mu + 0.4); + return std::min(1.0, mu + 0.05); } } // namespace gtsam \ No newline at end of file diff --git a/gtsam/sam/RISAMGraduatedKernel.h b/gtsam/sam/RISAMGraduatedKernel.h index 389a2ff589..557a9f00da 100644 --- a/gtsam/sam/RISAMGraduatedKernel.h +++ b/gtsam/sam/RISAMGraduatedKernel.h @@ -16,6 +16,10 @@ #include namespace gtsam { + +/** @brief Base class for graduated kernels for riSAM + * Advanced users can write their own kernels by inheriting from this class + */ class GraduatedKernel { /** TYPES **/ public: @@ -90,92 +94,42 @@ class GraduatedKernel { }; /* ************************************************************************* */ +/** @brief Implementation of the Scale Invariant Graduated Kernel + * Based on the Geman-McClure Kernel this kernel has the mathematical form: + * + * \rho(r) = 0.5 * \frac{c^2 * r^2}{c^2 + (r^2)^\mu} + * + * where c is the shape parameter and r is the residual of the factor + */ class SIGKernel : public GraduatedKernel { - /** My version of the Graduated version of the Geman-McClure kernel */ - /** TYPES **/ public: /// @brief Shortcut for shared pointer typedef std::shared_ptr shared_ptr; - - /// @brief Strategies for updating Mu between GNC iterations of riSAM - enum class MuUpdateStrategy { - MCGANN_2023, // Geometric Series: mu[t+1] = mu[t] + (mu[t] + 0.1) * 1.2 - SIMPLE_HUBER, // Fixed Pattern [mu_init, 0.5, 1.0] - KANG_CODE_2023, // Sequence: [mu_init, mu*, 1.0] with early outlier - // rejection for r > kang_residual_threshold [2] - KANG_PAPER_2023, // Sequence: [mu_init, mu*, 1.0] (no early outlier - // rejection was presented in paper) [2] - ONESTEP, // Jump to mu=1.0 after initial convex step - HALVES, // Arithmetic Series: mu[t+1] = mu[t] + (1/2) - THIRDS, // Arithmetic Series: mu[t+1] = mu[t] + (1/3) - FOURTHS, // Arithmetic Series: mu[t+1] = mu[t] + (1/4) - ASYM_FOURTHS, // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.5 if - // (t==0) else mu[t] + 0.25 - NONCONVEX, // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.7 if - // (t==0) else mu[t] + 0.2 - STABLE // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.5 if (t==0), - // mu[t] + 0.4 if (t==1), else mu[t] + 0.05 - }; - /// @brief Strategies for update Mu_init after convergence - enum class MuInitIncrementStrategy { - MCGANN_2023, - EQUAL_3, - EQUAL_4, - EQUAL_5, - EQUAL_10, - EQUAL_20, - EQUAL_50 - }; - - struct Parameters { - /// @brief The shape parameter of the GM kernel. - double shape_param{1}; - /// @brief The update strategy to use for mu updates - MuUpdateStrategy mu_update_strat{MuUpdateStrategy::MCGANN_2023}; - /// @brief The update strategy to use for mu_init increments - MuInitIncrementStrategy mu_init_inc_strat{ - MuInitIncrementStrategy::MCGANN_2023}; - - /// @brief The residual threshold use to skip to mu=1.0 in the provided for - /// [2] Required only if mu_update_strat == KANG_CODE_2023 - std::optional kang_residual_threshold{std::nullopt}; - - /// @brief Default Constructor - explicit Parameters() = default; - /// @brief Parameterized Constructor - Parameters(double shape_param, MuUpdateStrategy mu_update_strat, - MuInitIncrementStrategy mu_init_inc_strat, - std::optional kang_residual_threshold = std::nullopt) - : shape_param(shape_param), - mu_update_strat(mu_update_strat), - mu_init_inc_strat(mu_init_inc_strat), - kang_residual_threshold(kang_residual_threshold) {} - /// @brief Static Constructor to work around GCC Bug - static Parameters Default() { return Parameters(); }; - }; + /// @brief Function type for mu update sequence + typedef std::function MuUpdateStrategy; /** FIELDS **/ public: - const Parameters params_; - - /** @brief Params Constructor - * @param params: The parameters to configure the kernel - */ - SIGKernel(const Parameters params = Parameters::Default()) - : GraduatedKernel(0.0, 1.0), params_(params) {}; + /// @brief The shape parameter of the SIG kernel 'c' + double shape_param; + /// @brief The update strategy for the sequence mu_ values + MuUpdateStrategy mu_update_strat; + /// @brief The amount to increment/decrement mu init if the factor is a strong + /// inlier/outlier when values converge + double mu_init_increment; /** @brief Individual Parameter Constructor * @param shape_param: The shape parameter for the kernel * @param mu_update_strat: The update strategy to use for mu updates - * @param mu_init_inc_strat: The update strategy to use for mu_init increments + * @param mu_init_increment: The amount to increment/decrement mu init */ - SIGKernel(double shape_param, MuUpdateStrategy mu_update_strat, - MuInitIncrementStrategy mu_init_inc_strat, - std::optional kang_residual_threshold = std::nullopt) + SIGKernel(double shape_param, MuUpdateStrategy mu_update_strat=muUpdateStable, + double mu_init_increment=0.2) : GraduatedKernel(0.0, 1.0), - params_(Parameters(shape_param, mu_update_strat, mu_init_inc_strat, - kang_residual_threshold)) {}; + shape_param(shape_param), + mu_update_strat(mu_update_strat), + mu_init_increment(mu_init_increment) {} /** Interface **/ public: @@ -193,6 +147,8 @@ class SIGKernel : public GraduatedKernel { /// @brief @see GraduatedKernel bool isMuConverged(const double& mu) const override; + /** STATIC HELPERS **/ + public: /** @brief Computes a shape param based on an an influence threshold for * outliers Computes a shape param such that an outlier (any measurement with * residual equal to or greater than the chi2_outlier_threshold) will have an @@ -220,9 +176,14 @@ class SIGKernel : public GraduatedKernel { */ static double shapeParamFromChi2(size_t dof, double chi2_threshold); - /** HELPERS **/ - protected: - /// @brief Second derivative of the SIG Kernel wrt r - double secondDerivative(const double& r, const double& mu) const; + /** Default Mu Update Strategies **/ + /// @brief Mu Update sequence empirically discovered and presented in the orig + /// riSAM paper + static double muUpdateMcGann2023(const double& mu, const double& residual, + const size_t& update_count); + /// @brief More stable Mu Update sequence developed empirically since + /// algorithm was published + static double muUpdateStable(const double& mu, const double& residual, + const size_t& update_count); }; } // namespace gtsam \ No newline at end of file diff --git a/gtsam/sam/tests/testRISAM.cpp b/gtsam/sam/tests/testRISAM.cpp new file mode 100644 index 0000000000..9c5f0e3eca --- /dev/null +++ b/gtsam/sam/tests/testRISAM.cpp @@ -0,0 +1,164 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testRISAM.cpp + * @brief Unit tests for RISAM and supporting classes + * @author Dan McGann + * @date January 2026 + */ + +#include +#include +#include + +using namespace std; +using namespace gtsam; + +/* ************************************************************************* */ +TEST(RISAMGraduatedKernel, error) { + SIGKernel kernel(1.0); + // At zero error is not dependent on mu + CHECK(assert_equal(0.0, kernel.error(0.0, 0.0), 1e-9)); + CHECK(assert_equal(0.0, kernel.error(0.0, 0.5), 1e-9)); + CHECK(assert_equal(0.0, kernel.error(0.0, 1.0), 1e-9)); + + // For Mu = 0.0 error is quadratic + CHECK(assert_equal(0.0025, kernel.error(0.1, 0.0), 1e-9)); + CHECK(assert_equal(0.4225, kernel.error(1.3, 0.0), 1e-9)); + CHECK(assert_equal(38.750625, kernel.error(12.45, 0.0), 1e-9)); + + // For 0.0 < Mu Error depends on shape + CHECK(assert_equal(0.00454545454, kernel.error(0.1, 0.5), 1e-9)); + CHECK(assert_equal(0.36739130434, kernel.error(1.3, 0.5), 1e-9)); + CHECK(assert_equal(5.76217472119, kernel.error(12.45, 0.5), 1e-9)); + + // For Mu == 1.0 error depends is Geman-Mcclure + CHECK(assert_equal(0.00495049504, kernel.error(0.1, 1.0), 1e-9)); + CHECK(assert_equal(0.31412639405, kernel.error(1.3, 1.0), 1e-9)); + CHECK(assert_equal(0.49679492315, kernel.error(12.45, 1.0), 1e-9)); +} + +/* ************************************************************************* */ +TEST(RISAMGraduatedKernel, weight) { + SIGKernel kernel(1.0); + // At Mu = 0 weights are identical at 0.5 + CHECK(assert_equal(0.5, kernel.weight(0.1, 0.0), 1e-9)); + CHECK(assert_equal(0.5, kernel.weight(1.3, 0.0), 1e-9)); + CHECK(assert_equal(0.5, kernel.weight(12.45, 0.0), 1e-9)); + + // At Mu = 1 weights are higher for low error + CHECK(assert_equal(0.9802960494, kernel.weight(0.1, 1.0), 1e-9)); + CHECK(assert_equal(0.13819598955, kernel.weight(1.3, 1.0), 1e-9)); + CHECK(assert_equal(0.00004109007, kernel.weight(12.45, 1.0), 1e-9)); + + // At Mu = 1 large residuals have ~0 weight + CHECK(assert_equal(0.0, kernel.weight(2000.0, 1.0), 1e-9)); + + // Across Mu residual of 0 will have weight = 1 + CHECK(assert_equal(1.0, kernel.weight(0.0, 0.1), 1e-9)); + CHECK(assert_equal(1.0, kernel.weight(0.0, 0.5), 1e-9)); + CHECK(assert_equal(1.0, kernel.weight(0.0, 0.8), 1e-9)); +} + +/* ************************************************************************* */ +TEST(RISAMGraduatedKernel, weightSystem) { + SIGKernel kernel(1.0); + vector A; + A.push_back(Matrix::Identity(3, 3)); + Vector b = Vector::Ones(3); + + kernel.weightSystem(A, b, 0.1, 0.0); + CHECK(assert_equal(Matrix::Identity(3, 3) * sqrt(0.5), A[0], 1e-9)); + CHECK(assert_equal(Vector::Ones(3) * sqrt(0.5), b, 1e-9)); +} + +/* ************************************************************************* */ +TEST(RISAMGraduatedKernel, muInitInc) { + SIGKernel kernel(1.0, SIGKernel::muUpdateStable, 0.1); + CHECK(assert_equal(0.1, kernel.incrementMuInit(0.0), 1e-9)); + CHECK(assert_equal(1.0, kernel.incrementMuInit(1.0), 1e-9)); + + CHECK(assert_equal(0.0, kernel.incrementMuInitInv(0.0), 1e-9)); + CHECK(assert_equal(0.9, kernel.incrementMuInitInv(1.0), 1e-9)); +} + +/* ************************************************************************* */ +TEST(RISAMGraduatedKernel, isMuConverged) { + SIGKernel kernel(1.0, SIGKernel::muUpdateStable, 0.1); + CHECK(!kernel.isMuConverged(0.5)); + CHECK(kernel.isMuConverged(1.0)); +} + +/* ************************************************************************* */ +TEST(RISAMGraduatedKernel, muUpdateMcGann2023) { + // Standard Sequence + CHECK(assert_equal(0.12, SIGKernel::muUpdateMcGann2023(0.0, 0.0, 0), 1e-9)); + CHECK(assert_equal(0.384, SIGKernel::muUpdateMcGann2023(0.12, 0.0, 1), 1e-9)); + CHECK( + assert_equal(0.9648, SIGKernel::muUpdateMcGann2023(0.384, 0.0, 2), 1e-9)); + CHECK(assert_equal(1.0, SIGKernel::muUpdateMcGann2023(0.9648, 0.0, 3), 1e-9)); +} + +/* ************************************************************************* */ +TEST(RISAMGraduatedKernel, muUpdateStable) { + // Standard Sequence + CHECK(assert_equal(0.5, SIGKernel::muUpdateStable(0.0, 0.0, 0), 1e-9)); + CHECK(assert_equal(0.9, SIGKernel::muUpdateStable(0.5, 0.0, 1), 1e-9)); + CHECK(assert_equal(0.95, SIGKernel::muUpdateStable(0.9, 0.0, 2), 1e-9)); + CHECK(assert_equal(1.0, SIGKernel::muUpdateStable(0.95, 0.0, 3), 1e-9)); + + // Sequence with Non-Zero mu_init + CHECK(assert_equal(0.7, SIGKernel::muUpdateStable(0.2, 0.0, 0), 1e-9)); + CHECK(assert_equal(1.0, SIGKernel::muUpdateStable(0.7, 0.0, 1), 1e-9)); + + // Sequence with Large Non-Zero mu_init + CHECK(assert_equal(1.0, SIGKernel::muUpdateStable(0.7, 0.0, 0), 1e-9)); +} + +/* ************************************************************************* */ +TEST(RISAMGraduatedFactor, linearize) { + SIGKernel::shared_ptr kernel = std::make_shared(1.0); + NonlinearFactor::shared_ptr factor = + RISAM::make_graduated>(kernel, 0, 0.0, + noiseModel::Unit::Create(1)); + gtsam::Values values; + + // Linearize at Weight = 0.5 + values.insert(0, 0.0); + GaussianFactor::shared_ptr lin_factor = factor->linearize(values); + auto [A, b] = lin_factor->jacobian(); + CHECK(assert_equal(Matrix::Identity(1, 1) * sqrt(0.5), A, 1e-9)); + CHECK(assert_equal(Vector::Zero(1, 1), b, 1e-9)); +} + +/* ************************************************************************* */ +TEST(RISAMGraduatedFactor, error) { + SIGKernel::shared_ptr kernel = std::make_shared(1.0); + NonlinearFactor::shared_ptr factor = + RISAM::make_graduated>(kernel, 0, 0.0, + noiseModel::Unit::Create(1)); + gtsam::Values values; + values.insert(0, 1.0); + CHECK(assert_equal(factor->error(values), 0.03125, 1e-9)); + + GraduatedFactor::shared_ptr grad_factor = + std::dynamic_pointer_cast(factor); + CHECK(assert_equal(grad_factor->residual(values), 1.0, 1e-9)); + CHECK(assert_equal(grad_factor->robustResidual(values), 0.25, 1e-9)); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ From 64227f887796a9cc0c0c4760f6ac0078a25f7b16 Mon Sep 17 00:00:00 2001 From: Dan McGann Date: Sat, 31 Jan 2026 14:27:11 -0500 Subject: [PATCH 06/17] Add riSAM test --- gtsam/sam/tests/testRISAM.cpp | 148 ++++++++++++++++++++++++++++++++-- 1 file changed, 142 insertions(+), 6 deletions(-) diff --git a/gtsam/sam/tests/testRISAM.cpp b/gtsam/sam/tests/testRISAM.cpp index 9c5f0e3eca..159b36b0f4 100644 --- a/gtsam/sam/tests/testRISAM.cpp +++ b/gtsam/sam/tests/testRISAM.cpp @@ -17,8 +17,15 @@ */ #include +#include +#include +#include +#include #include +#include +#include #include +#include using namespace std; using namespace gtsam; @@ -101,11 +108,10 @@ TEST(RISAMGraduatedKernel, isMuConverged) { /* ************************************************************************* */ TEST(RISAMGraduatedKernel, muUpdateMcGann2023) { // Standard Sequence - CHECK(assert_equal(0.12, SIGKernel::muUpdateMcGann2023(0.0, 0.0, 0), 1e-9)); - CHECK(assert_equal(0.384, SIGKernel::muUpdateMcGann2023(0.12, 0.0, 1), 1e-9)); - CHECK( - assert_equal(0.9648, SIGKernel::muUpdateMcGann2023(0.384, 0.0, 2), 1e-9)); - CHECK(assert_equal(1.0, SIGKernel::muUpdateMcGann2023(0.9648, 0.0, 3), 1e-9)); + CHECK(assert_equal(0.12, SIGKernel::muUpdateMcGann2023(0.0, 0, 0), 1e-9)); + CHECK(assert_equal(0.384, SIGKernel::muUpdateMcGann2023(0.12, 0, 1), 1e-9)); + CHECK(assert_equal(0.9648, SIGKernel::muUpdateMcGann2023(0.384, 0, 2), 1e-9)); + CHECK(assert_equal(1.0, SIGKernel::muUpdateMcGann2023(0.9648, 0, 3), 1e-9)); } /* ************************************************************************* */ @@ -157,8 +163,138 @@ TEST(RISAMGraduatedFactor, error) { } /* ************************************************************************* */ +TEST(RISAM, GaussianRISAMNoOutliers) { + SharedDiagonal odoNoise = noiseModel::Diagonal::Sigmas( + (Vector(3) << 0.1, 0.1, M_PI / 100.0).finished()); + SharedDiagonal brNoise = + noiseModel::Diagonal::Sigmas((Vector(2) << M_PI / 100.0, 0.1).finished()); + + // These variables will be reused and accumulate factors and values + SIGKernel::shared_ptr kernel = std::make_shared( + SIGKernel::shapeParamFromInfThresh(0.1, 3, 0.95)); + RISAM::Parameters params; + ISAM2DoglegParams dlparams; + dlparams.verbose = true; + dlparams.initialDelta = 0.01; + params.isam2_params = ISAM2Params(dlparams); // ISAM2DoglegLineSearchParams(0.02, + // 1.0, 1.5, 1e-3, 1e-4, true)); + ISAM2 risam(params.isam2_params); + Values fullinit; + NonlinearFactorGraph fullgraph; + + // i keeps track of the time step + size_t i = 0; + + // Add a prior at time 0 and update risam + { + NonlinearFactorGraph newfactors; + newfactors.addPrior(0, Pose2(0.0, 0.0, 0.0), odoNoise); + fullgraph.push_back(newfactors); + + Values init; + init.insert((0), Pose2(0.01, 0.01, 0.01)); + fullinit.insert((0), Pose2(0.01, 0.01, 0.01)); + + risam.update(newfactors, init); + } + + // Add odometry from time 0 to time 5 + for (; i < 5; ++i) { + NonlinearFactorGraph newfactors; + newfactors.emplace_shared>( + i, i + 1, Pose2(1.0, 0.0, 0.0), odoNoise); + fullgraph.push_back(newfactors); + + Values init; + init.insert((i + 1), Pose2(double(i + 1) + 0.1, -0.1, 0.01)); + fullinit.insert((i + 1), Pose2(double(i + 1) + 0.1, -0.1, 0.01)); + + risam.update(newfactors, init); + } + + // Add odometry from time 5 to 6 and landmark measurement at time 5 + { + NonlinearFactorGraph newfactors; + newfactors.emplace_shared>( + i, i + 1, Pose2(1.0, 0.0, 0.0), odoNoise); + newfactors.push_back(std::make_shared>( + i, 100, Rot2::fromAngle(M_PI / 2.0), 5.0, brNoise)); + newfactors.push_back(std::make_shared>( + i, 101, Rot2::fromAngle(-M_PI / 2.0), 5.0, brNoise)); + fullgraph.push_back(newfactors); + + Values init; + init.insert((i + 1), Pose2(1.01, 0.01, 0.01)); + init.insert(100, Point2(5.0 / sqrt(2.0), 5.0 / sqrt(2.0))); + init.insert(101, Point2(5.0 / sqrt(2.0), -5.0 / sqrt(2.0))); + fullinit.insert((i + 1), Pose2(1.01, 0.01, 0.01)); + fullinit.insert(100, Point2(5.0 / sqrt(2.0), 5.0 / sqrt(2.0))); + fullinit.insert(101, Point2(5.0 / sqrt(2.0), -5.0 / sqrt(2.0))); + + risam.update(newfactors, init); + ++i; + } + + // Add odometry from time 6 to time 10 + for (; i < 10; ++i) { + NonlinearFactorGraph newfactors; + newfactors.emplace_shared>( + i, i + 1, Pose2(1.0, 0.0, 0.0), odoNoise); + fullgraph.push_back(newfactors); + + Values init; + init.insert((i + 1), Pose2(double(i + 1) + 0.1, -0.1, 0.01)); + fullinit.insert((i + 1), Pose2(double(i + 1) + 0.1, -0.1, 0.01)); + + risam.update(newfactors, init); + } + + // Add odometry from time 10 to 11 and landmark measurement at time 10 + { + NonlinearFactorGraph newfactors; + newfactors.emplace_shared>( + i, i + 1, Pose2(1.0, 0.0, 0.0), odoNoise); + newfactors.push_back(std::make_shared>( + i, 100, Rot2::fromAngle((3 * M_PI) / 4.0), 7.07106, brNoise)); + newfactors.push_back(std::make_shared>( + i, 101, Rot2::fromAngle(-(3 * M_PI) / 4.0), 7.07106, brNoise)); + fullgraph.push_back(newfactors); + + // Add an Outlier Measurement at 8 [Is not added to fullGraph] + // newfactors.push_back( + // RISAM::make_graduated>( + // kernel, i, 100, Rot2::fromAngle(0.4), 300, brNoise)); + + Values init; + init.insert((i + 1), Pose2(double(i + 1) + 0.1, 0.1, 0.01)); + fullinit.insert((i + 1), Pose2(double(i + 1) + 0.1, 0.1, 0.01)); + + risam.update(newfactors, init); + ++i; + } + + /** Compare Results **/ + // Compute Actual + risam.update(); + Values actual = risam.calculateEstimate(); + // Compute Expected + LevenbergMarquardtParams parameters; + Values expected = + LevenbergMarquardtOptimizer(fullgraph, fullinit, parameters).optimize(); + + std::cout << "Expected Error:" << fullgraph.error(expected) << std::endl; + std::cout << "Actual Error:" << fullgraph.error(actual) << std::endl; + + // Test + CHECK(assert_equal(expected, actual, 0.1)); + CHECK(false); +} + +/* ************************************************************************* + */ int main() { TestResult tr; return TestRegistry::runAllTests(tr); } -/* ************************************************************************* */ +/* ************************************************************************* + */ From 8ba9cdcacc8329b859d73a6d6ff6d76effecedf2 Mon Sep 17 00:00:00 2001 From: Dan McGann Date: Wed, 4 Feb 2026 22:31:44 -0500 Subject: [PATCH 07/17] Clean riSAM tests and Param constructor --- gtsam/sam/RISAM.h | 32 ++++++++++----- gtsam/sam/tests/testRISAM.cpp | 75 +++++++++++++++++++---------------- 2 files changed, 61 insertions(+), 46 deletions(-) diff --git a/gtsam/sam/RISAM.h b/gtsam/sam/RISAM.h index f7d24ce3d9..91fa3c8d2c 100644 --- a/gtsam/sam/RISAM.h +++ b/gtsam/sam/RISAM.h @@ -31,22 +31,34 @@ class RISAM { */ struct Parameters { /// @brief Explicit constructor to use default values - explicit Parameters() = default; + Parameters(ISAM2Params isam2_params = ISAM2Params(), + bool increment_outlier_mu = true, + double outlier_mu_chisq_upper_bound = 0.95, + double outlier_mu_chisq_lower_bound = 0.25, + double outlier_mu_avg_var_convergence_thresh = 0.01, + size_t number_extra_iters = 1) + : isam2_params(isam2_params), + increment_outlier_mu(increment_outlier_mu), + outlier_mu_chisq_upper_bound(outlier_mu_chisq_upper_bound), + outlier_mu_chisq_lower_bound(outlier_mu_chisq_lower_bound), + outlier_mu_avg_var_convergence_thresh( + outlier_mu_avg_var_convergence_thresh), + number_extra_iters(number_extra_iters) {}; /// @brief The parameters for the encapsulated iSAM2 algorithm @Note some /// are overriden in RISAM::RISAM() ISAM2Params isam2_params; /// @brief Flag to increment mu_init when the value estimate converges // See RISAM::IncrementMuInit for details - bool increment_outlier_mu{true}; + bool increment_outlier_mu; /// @brief Increment mu_init if chi^2 > upper bound - double outlier_mu_chisq_upper_bound{0.95}; + double outlier_mu_chisq_upper_bound; /// @brief Decrement mu_init if chi^2 < lower bound - double outlier_mu_chisq_lower_bound{0.25}; + double outlier_mu_chisq_lower_bound; /// @brief Average variable delta threshold to identify value convergence - double outlier_mu_avg_var_convergence_thresh{0.01}; + double outlier_mu_avg_var_convergence_thresh; /// @brief The number of extra iterations to perform after mu convergence - size_t number_extra_iters{1}; + size_t number_extra_iters; }; /** @brief Struct containing information about the riSAM update @@ -223,11 +235,9 @@ class RISAM { * @param args: The arguments required to construct the FACTOR_TYPE */ template - static - typename GenericGraduatedFactor::shared_ptr - make_graduated(Args&&... args) { - return std::make_shared< - GenericGraduatedFactor>( + static typename GenericGraduatedFactor::shared_ptr + make_graduated(Args&&... args) { + return std::make_shared>( std::forward(args)...); } }; diff --git a/gtsam/sam/tests/testRISAM.cpp b/gtsam/sam/tests/testRISAM.cpp index 159b36b0f4..d3151807e4 100644 --- a/gtsam/sam/tests/testRISAM.cpp +++ b/gtsam/sam/tests/testRISAM.cpp @@ -31,7 +31,7 @@ using namespace std; using namespace gtsam; /* ************************************************************************* */ -TEST(RISAMGraduatedKernel, error) { +TEST(RISAMGraduatedKernel, Error) { SIGKernel kernel(1.0); // At zero error is not dependent on mu CHECK(assert_equal(0.0, kernel.error(0.0, 0.0), 1e-9)); @@ -55,7 +55,7 @@ TEST(RISAMGraduatedKernel, error) { } /* ************************************************************************* */ -TEST(RISAMGraduatedKernel, weight) { +TEST(RISAMGraduatedKernel, Weight) { SIGKernel kernel(1.0); // At Mu = 0 weights are identical at 0.5 CHECK(assert_equal(0.5, kernel.weight(0.1, 0.0), 1e-9)); @@ -77,7 +77,7 @@ TEST(RISAMGraduatedKernel, weight) { } /* ************************************************************************* */ -TEST(RISAMGraduatedKernel, weightSystem) { +TEST(RISAMGraduatedKernel, WeightSystem) { SIGKernel kernel(1.0); vector A; A.push_back(Matrix::Identity(3, 3)); @@ -89,7 +89,7 @@ TEST(RISAMGraduatedKernel, weightSystem) { } /* ************************************************************************* */ -TEST(RISAMGraduatedKernel, muInitInc) { +TEST(RISAMGraduatedKernel, MuInitInc) { SIGKernel kernel(1.0, SIGKernel::muUpdateStable, 0.1); CHECK(assert_equal(0.1, kernel.incrementMuInit(0.0), 1e-9)); CHECK(assert_equal(1.0, kernel.incrementMuInit(1.0), 1e-9)); @@ -99,14 +99,14 @@ TEST(RISAMGraduatedKernel, muInitInc) { } /* ************************************************************************* */ -TEST(RISAMGraduatedKernel, isMuConverged) { +TEST(RISAMGraduatedKernel, IsMuConverged) { SIGKernel kernel(1.0, SIGKernel::muUpdateStable, 0.1); CHECK(!kernel.isMuConverged(0.5)); CHECK(kernel.isMuConverged(1.0)); } /* ************************************************************************* */ -TEST(RISAMGraduatedKernel, muUpdateMcGann2023) { +TEST(RISAMGraduatedKernel, MuUpdateMcGann2023) { // Standard Sequence CHECK(assert_equal(0.12, SIGKernel::muUpdateMcGann2023(0.0, 0, 0), 1e-9)); CHECK(assert_equal(0.384, SIGKernel::muUpdateMcGann2023(0.12, 0, 1), 1e-9)); @@ -115,7 +115,7 @@ TEST(RISAMGraduatedKernel, muUpdateMcGann2023) { } /* ************************************************************************* */ -TEST(RISAMGraduatedKernel, muUpdateStable) { +TEST(RISAMGraduatedKernel, MuUpdateStable) { // Standard Sequence CHECK(assert_equal(0.5, SIGKernel::muUpdateStable(0.0, 0.0, 0), 1e-9)); CHECK(assert_equal(0.9, SIGKernel::muUpdateStable(0.5, 0.0, 1), 1e-9)); @@ -131,7 +131,7 @@ TEST(RISAMGraduatedKernel, muUpdateStable) { } /* ************************************************************************* */ -TEST(RISAMGraduatedFactor, linearize) { +TEST(RISAMGraduatedFactor, Linearize) { SIGKernel::shared_ptr kernel = std::make_shared(1.0); NonlinearFactor::shared_ptr factor = RISAM::make_graduated>(kernel, 0, 0.0, @@ -147,7 +147,7 @@ TEST(RISAMGraduatedFactor, linearize) { } /* ************************************************************************* */ -TEST(RISAMGraduatedFactor, error) { +TEST(RISAMGraduatedFactor, Error) { SIGKernel::shared_ptr kernel = std::make_shared(1.0); NonlinearFactor::shared_ptr factor = RISAM::make_graduated>(kernel, 0, 0.0, @@ -163,22 +163,21 @@ TEST(RISAMGraduatedFactor, error) { } /* ************************************************************************* */ -TEST(RISAM, GaussianRISAMNoOutliers) { +TEST(RISAM, RISAMIntegrationTest) { SharedDiagonal odoNoise = noiseModel::Diagonal::Sigmas( (Vector(3) << 0.1, 0.1, M_PI / 100.0).finished()); SharedDiagonal brNoise = noiseModel::Diagonal::Sigmas((Vector(2) << M_PI / 100.0, 0.1).finished()); - // These variables will be reused and accumulate factors and values + // Setup the Solver SIGKernel::shared_ptr kernel = std::make_shared( SIGKernel::shapeParamFromInfThresh(0.1, 3, 0.95)); RISAM::Parameters params; - ISAM2DoglegParams dlparams; - dlparams.verbose = true; - dlparams.initialDelta = 0.01; - params.isam2_params = ISAM2Params(dlparams); // ISAM2DoglegLineSearchParams(0.02, - // 1.0, 1.5, 1e-3, 1e-4, true)); - ISAM2 risam(params.isam2_params); + params.isam2_params = ISAM2Params( + ISAM2DoglegLineSearchParams(0.02, 1.0, 1.5, 1e-3, 1e-4, false)); + RISAM risam(params); + + // Setup Container for the full problem Values fullinit; NonlinearFactorGraph fullgraph; @@ -217,10 +216,12 @@ TEST(RISAM, GaussianRISAMNoOutliers) { NonlinearFactorGraph newfactors; newfactors.emplace_shared>( i, i + 1, Pose2(1.0, 0.0, 0.0), odoNoise); - newfactors.push_back(std::make_shared>( - i, 100, Rot2::fromAngle(M_PI / 2.0), 5.0, brNoise)); - newfactors.push_back(std::make_shared>( - i, 101, Rot2::fromAngle(-M_PI / 2.0), 5.0, brNoise)); + newfactors.push_back( + RISAM::make_graduated>( + kernel, i, 100, Rot2::fromAngle(M_PI / 2.0), 5.0, brNoise)); + newfactors.push_back( + RISAM::make_graduated>( + kernel, i, 101, Rot2::fromAngle(-M_PI / 2.0), 5.0, brNoise)); fullgraph.push_back(newfactors); Values init; @@ -254,16 +255,20 @@ TEST(RISAM, GaussianRISAMNoOutliers) { NonlinearFactorGraph newfactors; newfactors.emplace_shared>( i, i + 1, Pose2(1.0, 0.0, 0.0), odoNoise); - newfactors.push_back(std::make_shared>( - i, 100, Rot2::fromAngle((3 * M_PI) / 4.0), 7.07106, brNoise)); - newfactors.push_back(std::make_shared>( - i, 101, Rot2::fromAngle(-(3 * M_PI) / 4.0), 7.07106, brNoise)); + newfactors.push_back( + RISAM::make_graduated>( + kernel, i, 100, Rot2::fromAngle((3 * M_PI) / 4.0), 7.07106, + brNoise)); + newfactors.push_back( + RISAM::make_graduated>( + kernel, i, 101, Rot2::fromAngle(-(3 * M_PI) / 4.0), 7.07106, + brNoise)); fullgraph.push_back(newfactors); - // Add an Outlier Measurement at 8 [Is not added to fullGraph] - // newfactors.push_back( - // RISAM::make_graduated>( - // kernel, i, 100, Rot2::fromAngle(0.4), 300, brNoise)); + // Add an Outlier Measurement [is not added to fullGraph] + newfactors.push_back( + RISAM::make_graduated>( + kernel, i, 100, Rot2::fromAngle(0.4), 300, brNoise)); Values init; init.insert((i + 1), Pose2(double(i + 1) + 0.1, 0.1, 0.01)); @@ -277,17 +282,17 @@ TEST(RISAM, GaussianRISAMNoOutliers) { // Compute Actual risam.update(); Values actual = risam.calculateEstimate(); + std::set actual_outliers = risam.getOutliers(0.95); // Compute Expected LevenbergMarquardtParams parameters; Values expected = LevenbergMarquardtOptimizer(fullgraph, fullinit, parameters).optimize(); - std::cout << "Expected Error:" << fullgraph.error(expected) << std::endl; - std::cout << "Actual Error:" << fullgraph.error(actual) << std::endl; - - // Test - CHECK(assert_equal(expected, actual, 0.1)); - CHECK(false); + // Test Solution Quality + CHECK(assert_equal(expected, actual, 0.01)); + // Test Outlier Identification + CHECK(1 == actual_outliers.size()); + CHECK(fullgraph.size() == *(actual_outliers.begin())); } /* ************************************************************************* From fcec2c52323d9515d1989665f69d0444f872cc56 Mon Sep 17 00:00:00 2001 From: Dan McGann Date: Sat, 14 Feb 2026 10:52:34 -0500 Subject: [PATCH 08/17] Update chi2 naming convention --- gtsam/sam/RISAM.cpp | 6 +++--- gtsam/sam/RISAMGraduatedKernel.cpp | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/gtsam/sam/RISAM.cpp b/gtsam/sam/RISAM.cpp index e9dbebb9d5..edd2f16195 100644 --- a/gtsam/sam/RISAM.cpp +++ b/gtsam/sam/RISAM.cpp @@ -171,7 +171,7 @@ std::set RISAM::getOutliers(double chi2_outlier_thresh) { std::dynamic_pointer_cast(nlf_ptr); if (grad_ptr) { const double thresh = - internal::chi_squared_quantile(nlf_ptr->dim(), chi2_outlier_thresh); + internal::chiSquaredQuantile(nlf_ptr->dim(), chi2_outlier_thresh); const double residual = grad_ptr->residual(current_est); if (residual > thresh) outlier_factors.insert(i); } @@ -251,9 +251,9 @@ void RISAM::incrementMuInits() { std::dynamic_pointer_cast(factors_[fidx]); double mahdist = grad_factor->residual(theta); - const double mah_upper_bound = internal::chi_squared_quantile( + const double mah_upper_bound = internal::chiSquaredQuantile( factors_[fidx]->dim(), params_.outlier_mu_chisq_upper_bound); - const double mah_lower_bound = internal::chi_squared_quantile( + const double mah_lower_bound = internal::chiSquaredQuantile( factors_[fidx]->dim(), params_.outlier_mu_chisq_lower_bound); if (mahdist > mah_upper_bound) { diff --git a/gtsam/sam/RISAMGraduatedKernel.cpp b/gtsam/sam/RISAMGraduatedKernel.cpp index fa4fe8406f..4787ca0d0e 100644 --- a/gtsam/sam/RISAMGraduatedKernel.cpp +++ b/gtsam/sam/RISAMGraduatedKernel.cpp @@ -45,7 +45,7 @@ bool SIGKernel::isMuConverged(const double& mu) const { double SIGKernel::shapeParamFromInfThresh(double influence_thresh, size_t dof, double chi2_outlier_thresh) { double outlier_residual_thresh = - internal::chi_squared_quantile(dof, chi2_outlier_thresh); + internal::chiSquaredQuantile(dof, chi2_outlier_thresh); // Equation from taking derivative of SIGKernel setting equal to // influence_thresh and solving for mu=1 const double t1 = @@ -57,7 +57,7 @@ double SIGKernel::shapeParamFromInfThresh(double influence_thresh, size_t dof, /* ************************************************************************* */ double SIGKernel::shapeParamFromChi2(size_t dof, double chi2_threshold) { - return internal::chi_squared_quantile(dof, chi2_threshold); + return internal::chiSquaredQuantile(dof, chi2_threshold); } /* ************************************************************************* */ From ea4633fd03ff7f221851232ac4b3b87146ae0710 Mon Sep 17 00:00:00 2001 From: Dan McGann Date: Sat, 14 Feb 2026 12:13:27 -0500 Subject: [PATCH 09/17] Fix formating in risam additions --- gtsam/sam/RISAMGraduatedKernel.cpp | 2 +- gtsam/sam/RISAMGraduatedKernel.h | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/gtsam/sam/RISAMGraduatedKernel.cpp b/gtsam/sam/RISAMGraduatedKernel.cpp index 4787ca0d0e..627f97ab1a 100644 --- a/gtsam/sam/RISAMGraduatedKernel.cpp +++ b/gtsam/sam/RISAMGraduatedKernel.cpp @@ -33,7 +33,7 @@ double SIGKernel::incrementMuInit(const double& mu) const { /* ************************************************************************* */ double SIGKernel::incrementMuInitInv(const double& mu) const { - return std::max(0.0, mu - this->mu_init_increment); + return std::max(0.0, mu - this->mu_init_increment); } /* ************************************************************************* */ diff --git a/gtsam/sam/RISAMGraduatedKernel.h b/gtsam/sam/RISAMGraduatedKernel.h index 557a9f00da..d12de1beda 100644 --- a/gtsam/sam/RISAMGraduatedKernel.h +++ b/gtsam/sam/RISAMGraduatedKernel.h @@ -124,8 +124,9 @@ class SIGKernel : public GraduatedKernel { * @param mu_update_strat: The update strategy to use for mu updates * @param mu_init_increment: The amount to increment/decrement mu init */ - SIGKernel(double shape_param, MuUpdateStrategy mu_update_strat=muUpdateStable, - double mu_init_increment=0.2) + SIGKernel(double shape_param, + MuUpdateStrategy mu_update_strat = muUpdateStable, + double mu_init_increment = 0.2) : GraduatedKernel(0.0, 1.0), shape_param(shape_param), mu_update_strat(mu_update_strat), From 71bdf37044a0461a5ffb2e2ff3b7fa95613220f6 Mon Sep 17 00:00:00 2001 From: Dan McGann Date: Mon, 2 Mar 2026 23:17:31 -0500 Subject: [PATCH 10/17] Updates for review changes to isam2 updates --- gtsam/sam/RISAM.cpp | 3 ++- gtsam/sam/tests/testRISAM.cpp | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/gtsam/sam/RISAM.cpp b/gtsam/sam/RISAM.cpp index edd2f16195..2d60e58740 100644 --- a/gtsam/sam/RISAM.cpp +++ b/gtsam/sam/RISAM.cpp @@ -277,7 +277,8 @@ std::set RISAM::convexifyInvolvedFactors( gtsam::ISAM2UpdateParams& update_params, UpdateResult& update_result) { // Gather all involved keys - Directly induced by the new factors gtsam::KeySet new_factor_keys = new_factors.keys(); - gtsam::KeySet involved_keys = solver_->traverseTop(new_factors.keyVector()); + gtsam::KeySet involved_keys = + solver_->collectAffectedKeys(new_factors.keyVector()); involved_keys.insert(new_factor_keys.begin(), new_factor_keys.end()); // Add to the gathered involved keys, any keys specified by the user diff --git a/gtsam/sam/tests/testRISAM.cpp b/gtsam/sam/tests/testRISAM.cpp index d3151807e4..fc330ed534 100644 --- a/gtsam/sam/tests/testRISAM.cpp +++ b/gtsam/sam/tests/testRISAM.cpp @@ -174,7 +174,7 @@ TEST(RISAM, RISAMIntegrationTest) { SIGKernel::shapeParamFromInfThresh(0.1, 3, 0.95)); RISAM::Parameters params; params.isam2_params = ISAM2Params( - ISAM2DoglegLineSearchParams(0.02, 1.0, 1.5, 1e-3, 1e-4, false)); + ISAM2DoglegLineSearchParams(0.02, 1.0, 1.5, 1e-3, false, 1e-4)); RISAM risam(params); // Setup Container for the full problem From 7e7155dff7f4cf71c3762659c929b5bde07bf2ae Mon Sep 17 00:00:00 2001 From: Dan McGann Date: Mon, 2 Mar 2026 23:23:57 -0500 Subject: [PATCH 11/17] Address co-pilot review --- gtsam/sam/RISAM.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/gtsam/sam/RISAM.cpp b/gtsam/sam/RISAM.cpp index 2d60e58740..c7dd528372 100644 --- a/gtsam/sam/RISAM.cpp +++ b/gtsam/sam/RISAM.cpp @@ -199,6 +199,7 @@ void RISAM::updateHouseKeeping(const gtsam::NonlinearFactorGraph& new_factors, for (const auto fidx : update_params.removeFactorIndices) { removed_factors.push_back(factors_.at(fidx)); factors_.remove(fidx); + factors_to_check_status_.erase(fidx); mu_.at(fidx).reset(); mu_inits_.at(fidx).reset(); } @@ -240,8 +241,9 @@ void RISAM::augmentMu(const gtsam::NonlinearFactorGraph& new_factors, void RISAM::incrementMuInits() { // Compute Average Delta gtsam::VectorValues delta = solver_->getDelta(); - bool is_sufficient_delta = delta.norm() / delta.size() < - params_.outlier_mu_avg_var_convergence_thresh; + bool is_sufficient_delta = + (delta.size() > 0) && (delta.norm() / delta.size() < + params_.outlier_mu_avg_var_convergence_thresh); // Evaluate All Graduated factors if sufficient average delta if (is_sufficient_delta) { From 695168eab1d089095b09b732dfb8638dd372027e Mon Sep 17 00:00:00 2001 From: Dan McGann Date: Sun, 15 Mar 2026 21:35:09 -0400 Subject: [PATCH 12/17] Fix shared_ptr compile issue --- gtsam/sam/RISAMGraduatedKernel.h | 1 + 1 file changed, 1 insertion(+) diff --git a/gtsam/sam/RISAMGraduatedKernel.h b/gtsam/sam/RISAMGraduatedKernel.h index d12de1beda..f00f1a3f8e 100644 --- a/gtsam/sam/RISAMGraduatedKernel.h +++ b/gtsam/sam/RISAMGraduatedKernel.h @@ -9,6 +9,7 @@ * @date Mar 2022 */ #pragma once +#include #include #include #include From 5a86d3b9262a0e50933665fc9859088ba7e97e21 Mon Sep 17 00:00:00 2001 From: Dan McGann Date: Sun, 15 Mar 2026 21:35:32 -0400 Subject: [PATCH 13/17] Add RISAM notebook docs --- gtsam/nonlinear/doc/RISAM.ipynb | 127 ++++++++++++++++++++++++++++++++ 1 file changed, 127 insertions(+) create mode 100644 gtsam/nonlinear/doc/RISAM.ipynb diff --git a/gtsam/nonlinear/doc/RISAM.ipynb b/gtsam/nonlinear/doc/RISAM.ipynb new file mode 100644 index 0000000000..d99d3f74c3 --- /dev/null +++ b/gtsam/nonlinear/doc/RISAM.ipynb @@ -0,0 +1,127 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "c950beef", + "metadata": {}, + "source": [ + "# RISAM - Robust Incremental Smoothing and Mapping\n", + "\n", + "## Overview\n", + "\n", + "The `RISAM` class in GTSAM is designed to perform online robust optimization using an incrementalized version of [Graduated Non-Convexity](GNCOptimizer.ipynb) built on the [ISAM2](ISAM2.ipynb) algorithm. This method is intended for scenarios where the incremental optimization problem is affected by outliers. In cases where all measurements are known to be inliers `RISAM` functions identically to `ISAM2`, however, when potential outliers are incorporated `RISAM` applies an incremental GNC step to the effected problem improving robustness over standard ISAM2 but preventing sensitivity to initialization found in M-Estimation approaches.\n", + "\n", + "Like the `GNCOptimizer`, `RISAM` leverages a robust cost function $\\rho(e)$, where $e$ is the error term. The goal is to minimize the sum of these robust costs over all measurements:\n", + "\n", + "$$\n", + "\\min_x \\sum_i \\rho(e_i(x))\n", + "$$\n", + "\n", + "Unlike the batch setting that `GNCOptimzier` is used for, `RISAM` targets the incremental problem where we incrementally incorporate measurements online.\n", + "\n", + "$$\n", + "\\min_{x^t} \\sum_i \\rho(e_i(x^t))\n", + "$$\n", + "\n", + "Where we re-solve only a small subproblem at each step that is affected by the new measurements. `RISAM` solves this sub-problem robustly by solving a continuation of problems defined by graduated robust kernel $\\rho(e, \\mu)$ where the control parameter $\\mu$ smoothly transitions the kernel from quadratic ($x^2$) to a robust loss.\n", + "\n", + "$$\n", + "\\rho(e^t, \\mu)\n", + "$$\n", + "\n", + "By starting with non-robust error `RISAM` better handles poor-initialization, and by transitioning to the final robust loss `RISAM` removes the influence of outliers.\n", + "\n", + "Key features:\n", + "\n", + "- **Online Robust Optimization**: `RISAM` is designed to support incremental optimization problems with outliers, using a robust cost function that can mitigate their effects.\n", + "- **Incremental Graduated Non-Convexity**: This technique allows the optimizer to solve each incremental subproblem with a convex problem and gradually transform it into the original non-convex problem, which helps in avoiding local minima.\n", + "\n", + "## Key Methods + Classes\n", + "\n", + "`RISAM` is designed to be a drop-in replacement for ISAM2. To see details on its key methods see [ISAM2.ipynb](ISAM2.ipynb).\n", + "\n", + "Additional Key Methods:\n", + "* `getOutliers`: Returns the set of measurements currently classified as outliers.\n", + "\n", + "Additional Key Helpers + Classes\n", + "* `make_graduated`: Constructs any factor as a `GraduatedFactor` which identifies factors as possible outliers to `RISAM`. All other factors are treated as known inliers.\n", + "* `SIGKernel`: The suggested kernel to use with `GraduatedFactors` provides stable optimization performance.\n", + "\n", + "## Parameters\n", + "\n", + "The `RISAM::Parameters` class defines parameters specific to `RISAM`:\n", + "\n", + "| Parameter | Type | Default Value | Description |\n", + "|-----------|------|---------------|-------------|\n", + "| isam2_params | ISAM2Params | ISAM2Params() | The parameters for the encapsulated ISAM2 optimizer. It is recommended to use DogLegLineSearch for optimization. |\n", + "| increment_outlier_mu | bool | true | Whether to increment the initial value of $\\mu$ used for each incremental GNC update over time as certainty of their inlier/outlier status increases. |\n", + "| outlier_mu_chisq_upper_bound | double | 0.95 | The $\\chi^2$ threshold for factor residual to consider it an outlier for $\\mu_{init}$ updates.. |\n", + "| outlier_mu_chisq_lower_bound | double | 0.25| The $\\chi^2$ threshold for factor residual to consider it strong inlier for $\\mu_{init}$ updates. |\n", + "| outlier_mu_avg_var_convergence_thresh | double | 0.01 | The threshold average variable delta to initiate $\\mu_{init}$ updates. |\n", + "| number_extra_iters | size_t | 1 | The number of extra `ISAM2::updates` called internally at each iteration after converging to the fully robust problem. |\n", + "\n", + "## Usage Considerations\n", + "\n", + "- **Outlier Rejection**: `RISAM` is particularly effective in online scenarios with significant outlier presence, such as online SLAM.\n", + "- **Trust Region Optimization**: While `RISAM` can use any underlying optimization step methods supported by ISAM2 it is strongly recommended to use `DoglegLineSearch` as it accounts for the changes in problem structure (convexity changes) and prevents divergence through trust region steps.\n", + "\n", + "## Files\n", + "\n", + "- [RISAM.h](https://github.com/borglab/gtsam/blob/develop/gtsam/sam/RISAM.h)\n", + "- [RISAMGraduatedFactor.h](https://github.com/borglab/gtsam/blob/develop/gtsam/sam/RISAMGraduatedFactor.h)\n", + "- [RISAMGraduatedKernel.h](https://github.com/borglab/gtsam/blob/develop/gtsam/sam/RISAMGraduatedKernel.h)" + ] + }, + { + "cell_type": "markdown", + "id": "colab_button", + "metadata": {}, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "markdown", + "id": "license_cell", + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "source": [ + "GTSAM Copyright 2010-2022, Georgia Tech Research Corporation,\n", + "Atlanta, Georgia 30332-0415\n", + "All Rights Reserved\n", + "\n", + "Authors: Frank Dellaert, et al. (see THANKS for the full author list)\n", + "\n", + "See LICENSE for the license information" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "colab_import", + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "outputs": [], + "source": [ + "try:\n", + " import google.colab\n", + " %pip install --quiet gtsam-develop\n", + "except ImportError:\n", + " pass" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 86c9099fadd129992d4a66736d6984abdfc16859 Mon Sep 17 00:00:00 2001 From: Dan McGann Date: Tue, 24 Mar 2026 23:54:13 -0400 Subject: [PATCH 14/17] Update Doxygen Regions --- gtsam/sam/RISAM.h | 20 ++++++++++++++----- gtsam/sam/RISAMGraduatedFactor.h | 24 +++++++++++++++++------ gtsam/sam/RISAMGraduatedKernel.h | 33 +++++++++++++++++++++++--------- 3 files changed, 57 insertions(+), 20 deletions(-) diff --git a/gtsam/sam/RISAM.h b/gtsam/sam/RISAM.h index 91fa3c8d2c..09102b211e 100644 --- a/gtsam/sam/RISAM.h +++ b/gtsam/sam/RISAM.h @@ -24,7 +24,8 @@ namespace gtsam { class RISAM { - /** TYPES **/ + /// @name Types + /// @{ public: /** @brief Struct Containing all configuration parameters for riSAM * See below for details on each parameters @@ -76,8 +77,10 @@ class RISAM { /// @brief The set of factors convexified in this update std::set convexified_factors; }; + /// @} - /** FIELDS **/ + /// @name Fields + /// @{ protected: /// @brief Configuration parameters for the riSAM algorithm Parameters params_; @@ -106,8 +109,10 @@ class RISAM { /// @brief The Factors for the underlying system /// INVARIANT: matches that of solver_ after every update NonlinearFactorGraph factors_; + /// @} - /** INTERFACE **/ + /// @name Public Interface + /// @{ public: /** @brief Constructs an instance of the riSAM algorithm with provided * configuration. Note this constructor will override some values in @@ -173,8 +178,10 @@ class RISAM { * algorithm. */ std::set getOutliers(double chi2_outlier_thresh); + /// @} - /** HELPERS **/ + /// @name Private Interface + /// @{ protected: /** @brief Preforms a robust update to the system. * A robust update is required any time new_factors contains GraduatedFactors @@ -226,9 +233,11 @@ class RISAM { const NonlinearFactorGraph& new_factors, const Values& new_theta, const std::optional> extra_gnc_involved_keys, ISAM2UpdateParams& internal_update_params, UpdateResult& update_result); + /// @} + /// @name Static Helpers + /// @{ public: - /** STATIC HELPERS **/ /** @brief Constructs a shared_ptr of FACTOR_TYPE graduated factor for riSAM * This identifies a factor as a potential outlier measurement. * @param kernel: The graduated kernel to use for this factor @@ -240,6 +249,7 @@ class RISAM { return std::make_shared>( std::forward(args)...); } + /// @} }; } // namespace gtsam \ No newline at end of file diff --git a/gtsam/sam/RISAMGraduatedFactor.h b/gtsam/sam/RISAMGraduatedFactor.h index f42ef6d3a0..40a2edce49 100644 --- a/gtsam/sam/RISAMGraduatedFactor.h +++ b/gtsam/sam/RISAMGraduatedFactor.h @@ -24,21 +24,26 @@ namespace gtsam { /// @brief Graduated Factor for riSAM base class class GraduatedFactor { - /** TYPES **/ + /// @name Types + /// @{ public: typedef std::shared_ptr shared_ptr; + /// @} - /** FIELDS **/ + /// @name Fields + /// @{ protected: /// @brief The Graduated Robust Kernel for this factor GraduatedKernel::shared_ptr kernel_; /// @brief The unique mu control parameter for this factor std::shared_ptr mu_; + /// @} /// Befriend RISAM to give access to these protected values friend class RISAM; - /**INTERFACE**/ + /// @name Public Interface + /// @{ public: /** @brief Constructor * @param kernel: The graduated kernel to apply to this factor @@ -73,6 +78,7 @@ class GraduatedFactor { /// @brief Copies this factor as an instance of its base type without the /// graduated kernel virtual gtsam::NonlinearFactor::shared_ptr cloneUngraduated() const = 0; + /// @} }; /// @brief Instantiation of Graduated Factor wrapping any Nonlinear Factor @@ -82,11 +88,14 @@ class GenericGraduatedFactor : public FACTOR_TYPE, public GraduatedFactor { "GraduatedFactor Must be instantiated with a Factor Derived " "from gtsam::NonlinearFactor."); - /** TYPES **/ + /// @name Types + /// @{ public: typedef std::shared_ptr> shared_ptr; + /// @} - /**INTERFACE**/ + /// @name Factor Interface + /// @{ public: /** @brief Constructor * @param kernel: The graduated kernel to use for this factor @@ -153,8 +162,10 @@ class GenericGraduatedFactor : public FACTOR_TYPE, public GraduatedFactor { double error(const gtsam::Values& values) const override { return 0.5 * std::pow(robustResidual(values), 2); } + /// @} - /** GRADUATED INTERFACE **/ + /// @name Graduated Interface + /// @{ /// @brief See GraduatedFactor::residual double residual(const gtsam::Values& current_estimate) const override { return sqrt(2.0 * FACTOR_TYPE::error(current_estimate)); @@ -169,5 +180,6 @@ class GenericGraduatedFactor : public FACTOR_TYPE, public GraduatedFactor { gtsam::NonlinearFactor::shared_ptr cloneUngraduated() const override { return FACTOR_TYPE::clone(); } + /// @} }; } // namespace gtsam \ No newline at end of file diff --git a/gtsam/sam/RISAMGraduatedKernel.h b/gtsam/sam/RISAMGraduatedKernel.h index f00f1a3f8e..d1dc6ffce2 100644 --- a/gtsam/sam/RISAMGraduatedKernel.h +++ b/gtsam/sam/RISAMGraduatedKernel.h @@ -9,11 +9,11 @@ * @date Mar 2022 */ #pragma once -#include #include #include #include +#include #include namespace gtsam { @@ -22,18 +22,23 @@ namespace gtsam { * Advanced users can write their own kernels by inheriting from this class */ class GraduatedKernel { - /** TYPES **/ + /// @name Types + /// @{ public: typedef std::shared_ptr shared_ptr; + /// @} - /** FIELDS **/ + /// @name Fields + /// @{ protected: /// @brief The initial Value for mu const double mu_init_; /// @brief The threshold at which to consider mu to be converged const double convergence_thresh_; + /// @} - /** INTERFACE **/ + /// @name Public Interface + /// @{ public: GraduatedKernel(double mu_init, double convergence_thresh) : mu_init_(mu_init), convergence_thresh_(convergence_thresh) {} @@ -92,6 +97,7 @@ class GraduatedKernel { /// @brief Returns true iff the value of \mu has converged to a non-convex /// state virtual bool isMuConverged(const double& mu) const = 0; + /// @} }; /* ************************************************************************* */ @@ -103,14 +109,17 @@ class GraduatedKernel { * where c is the shape parameter and r is the residual of the factor */ class SIGKernel : public GraduatedKernel { - /** TYPES **/ + /// @name Types + /// @{ public: /// @brief Shortcut for shared pointer typedef std::shared_ptr shared_ptr; /// @brief Function type for mu update sequence typedef std::function MuUpdateStrategy; + /// @} - /** FIELDS **/ + /// @name Fields + /// @{ public: /// @brief The shape parameter of the SIG kernel 'c' double shape_param; @@ -119,7 +128,12 @@ class SIGKernel : public GraduatedKernel { /// @brief The amount to increment/decrement mu init if the factor is a strong /// inlier/outlier when values converge double mu_init_increment; + /// @} + + /// @name Public Interface + /// @{ + public: /** @brief Individual Parameter Constructor * @param shape_param: The shape parameter for the kernel * @param mu_update_strat: The update strategy to use for mu updates @@ -133,8 +147,6 @@ class SIGKernel : public GraduatedKernel { mu_update_strat(mu_update_strat), mu_init_increment(mu_init_increment) {} - /** Interface **/ - public: /// @brief @see GraduatedKernel double error(const double& residual, const double& mu) const override; /// @brief @see GraduatedKernel @@ -148,8 +160,10 @@ class SIGKernel : public GraduatedKernel { double incrementMuInitInv(const double& mu) const override; /// @brief @see GraduatedKernel bool isMuConverged(const double& mu) const override; + /// @} - /** STATIC HELPERS **/ + /// @name Static Helpers + /// @{ public: /** @brief Computes a shape param based on an an influence threshold for * outliers Computes a shape param such that an outlier (any measurement with @@ -187,5 +201,6 @@ class SIGKernel : public GraduatedKernel { /// algorithm was published static double muUpdateStable(const double& mu, const double& residual, const size_t& update_count); + /// @} }; } // namespace gtsam \ No newline at end of file From 64ce7544e370c3276cf10fefac1bb4e554310b7f Mon Sep 17 00:00:00 2001 From: Dan McGann Date: Tue, 24 Mar 2026 23:56:03 -0400 Subject: [PATCH 15/17] Add graduated weight/loss to robust lost functions --- gtsam/linear/LossFunctions.cpp | 312 +++++++++++++++++++++++++-------- gtsam/linear/LossFunctions.h | 170 +++++++++++++++--- gtsam/nonlinear/GncOptimizer.h | 6 +- 3 files changed, 395 insertions(+), 93 deletions(-) diff --git a/gtsam/linear/LossFunctions.cpp b/gtsam/linear/LossFunctions.cpp index 616d8a696d..f5b8867393 100644 --- a/gtsam/linear/LossFunctions.cpp +++ b/gtsam/linear/LossFunctions.cpp @@ -155,6 +155,14 @@ double Fair::loss(double distance) const { return c_2 * (normalizedError - std::log1p(normalizedError)); } +double Fair::graduatedWeight(double /*error*/, double /*error*/) const { + throw std::logic_error("Fair loss has no graduated form."); +} + +double Fair::graduatedLoss(double /*error*/, double /*error*/) const { + throw std::logic_error("Fair loss has no graduated form."); +} + void Fair::print(const std::string &s="") const { cout << s << "fair (" << c_ << ")" << endl; } @@ -177,20 +185,32 @@ Huber::Huber(double k, const ReweightScheme reweight) : Base(reweight), k_(k) { } } -double Huber::weight(double distance) const { +double Huber::Weight(double k, double distance) { const double absError = std::abs(distance); - return (absError <= k_) ? (1.0) : (k_ / absError); + return (absError <= k) ? (1.0) : (k / absError); } -double Huber::loss(double distance) const { +double Huber::Loss(double k, double distance) { const double absError = std::abs(distance); - if (absError <= k_) { // |x| <= k - return distance*distance / 2; - } else { // |x| > k - return k_ * (absError - (k_/2)); + if (absError <= k) { // |x| <= k + return distance * distance / 2; + } else { // |x| > k + return k * (absError - (k / 2)); } } +double Huber::weight(double distance) const { return Weight(k_, distance); } + +double Huber::loss(double distance) const { return Loss(k_, distance); } + +double Huber::graduatedWeight(double distance, double mu) const { + return Weight(mu * k_, distance); +} + +double Huber::graduatedLoss(double distance, double mu) const { + return Loss(mu * k_, distance); +} + void Huber::print(const std::string &s="") const { cout << s << "huber (" << k_ << ")" << endl; } @@ -215,13 +235,27 @@ Cauchy::Cauchy(double k, const ReweightScheme reweight) : Base(reweight), k_(k), } } +double Cauchy::Weight(double ksquared, double distance) { + return ksquared / (ksquared + distance * distance); +} + +double Cauchy::Loss(double ksquared, double distance) { + const double val = std::log1p(distance * distance / ksquared); + return ksquared * val * 0.5; +} + double Cauchy::weight(double distance) const { - return ksquared_ / (ksquared_ + distance*distance); + return Weight(ksquared_, distance); } -double Cauchy::loss(double distance) const { - const double val = std::log1p(distance * distance / ksquared_); - return ksquared_ * val * 0.5; +double Cauchy::loss(double distance) const { return Loss(ksquared_, distance); } + +double Cauchy::graduatedWeight(double distance, double mu) const { + return Cauchy::Weight((mu * mu) * ksquared_, distance); +} + +double Cauchy::graduatedLoss(double distance, double mu) const { + return Cauchy::Loss((mu * mu) * ksquared_, distance); } void Cauchy::print(const std::string &s="") const { @@ -248,25 +282,41 @@ Tukey::Tukey(double c, const ReweightScheme reweight) : Base(reweight), c_(c), c } } -double Tukey::weight(double distance) const { - if (std::abs(distance) <= c_) { - const double one_minus_xc2 = 1.0 - distance*distance/csquared_; +double Tukey::Weight(double c, double csquared, double distance) { + if (std::abs(distance) <= c) { + const double one_minus_xc2 = 1.0 - distance * distance / csquared; return one_minus_xc2 * one_minus_xc2; } return 0.0; } -double Tukey::loss(double distance) const { - double absError = std::abs(distance); - if (absError <= c_) { - const double one_minus_xc2 = 1.0 - distance*distance/csquared_; - const double t = one_minus_xc2*one_minus_xc2*one_minus_xc2; - return csquared_ * (1 - t) / 6.0; +double Tukey::Loss(double c, double csquared, double distance) { + const double absError = std::abs(distance); + if (absError <= c) { + const double one_minus_xc2 = 1.0 - distance * distance / csquared; + const double t = one_minus_xc2 * one_minus_xc2 * one_minus_xc2; + return csquared * (1 - t) / 6.0; } else { - return csquared_ / 6.0; + return csquared / 6.0; } } +double Tukey::weight(double distance) const { + return Weight(c_, csquared_, distance); +} + +double Tukey::loss(double distance) const { + return Loss(c_, csquared_, distance); +} + +double Tukey::graduatedWeight(double distance, double mu) const { + return Weight(mu * c_, (mu * mu) * csquared_, distance); +} + +double Tukey::graduatedLoss(double distance, double mu) const { + return Loss(mu * c_, (mu * mu) * csquared_, distance); +} + void Tukey::print(const std::string &s="") const { std::cout << s << ": Tukey (" << c_ << ")" << std::endl; } @@ -287,14 +337,28 @@ Tukey::shared_ptr Tukey::Create(double c, const ReweightScheme reweight) { Welsch::Welsch(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {} -double Welsch::weight(double distance) const { - const double xc2 = (distance*distance)/csquared_; +double Welsch::Weight(double csquared, double distance) { + const double xc2 = (distance * distance) / csquared; return std::exp(-xc2); } -double Welsch::loss(double distance) const { - const double xc2 = (distance*distance)/csquared_; - return csquared_ * 0.5 * -std::expm1(-xc2); +double Welsch::Loss(double csquared, double distance) { + const double xc2 = (distance * distance) / csquared; + return csquared * 0.5 * -std::expm1(-xc2); +} + +double Welsch::weight(double distance) const { + return Weight(csquared_, distance); +} + +double Welsch::loss(double distance) const { return Loss(csquared_, distance); } + +double Welsch::graduatedWeight(double distance, double mu) const { + return Weight((mu * mu) * csquared_, distance); +} + +double Welsch::graduatedLoss(double distance, double mu) const { + return Loss((mu * mu) * csquared_, distance); } void Welsch::print(const std::string &s="") const { @@ -314,23 +378,47 @@ Welsch::shared_ptr Welsch::Create(double c, const ReweightScheme reweight) { /* ************************************************************************* */ // GemanMcClure /* ************************************************************************* */ -GemanMcClure::GemanMcClure(double c, const ReweightScheme reweight) - : Base(reweight), c_(c), csquared_(c * c) { +GemanMcClure::GemanMcClure(double c, const GemanMcClure::GradScheme graduation, + const ReweightScheme reweight) + : Base(reweight), c_(c), csquared_(c * c), graduation_(graduation) {} + +double GemanMcClure::Weight(double csquared, double distance) { + const double c4 = csquared * csquared; + const double c2error = csquared + (distance * distance); + return c4 / (c2error * c2error); } -double GemanMcClure::weight(double distance) const { - return Weight(distance*distance, csquared_); +double GemanMcClure::Loss(double csquared, double distance) { + const double error2 = distance * distance; + return 0.5 * (csquared * error2) / (csquared + error2); } -double GemanMcClure::Weight(double distance2, double c2) { - const double c4 = c2*c2; - const double c2error = c2 + distance2; - return c4/(c2error*c2error); +double GemanMcClure::weight(double distance) const { + return Weight(csquared_, distance); } double GemanMcClure::loss(double distance) const { - const double error2 = distance*distance; - return 0.5 * (csquared_ * error2) / (csquared_ + error2); + return Loss(csquared_, distance); +} + +double GemanMcClure::graduatedWeight(double distance, double mu) const { + if (graduation_ == GemanMcClure::GradScheme::STANDARD) { + return Weight(mu * csquared_, distance); + } else { // GemanMcClure::GradScheme::SCALE_INVARIANT + const double d2 = distance * distance; + const double sqrt_denom = csquared_ + std::pow(d2, mu); + return (csquared_ * (csquared_ + std::pow(d2, mu) * (1 - mu))) / + (sqrt_denom * sqrt_denom); + } +} + +double GemanMcClure::graduatedLoss(double distance, double mu) const { + if (graduation_ == GemanMcClure::GradScheme::STANDARD) { + return Loss(mu * csquared_, distance); + } else { // GemanMcClure::GradScheme::SCALE_INVARIANT + const double d2 = distance * distance; + return 0.5 * (csquared_ * d2) / (csquared_ + std::pow(d2, mu)); + } } void GemanMcClure::print(const std::string &s="") const { @@ -343,8 +431,10 @@ bool GemanMcClure::equals(const Base &expected, double tol) const { return std::abs(c_ - p->c_) < tol; } -GemanMcClure::shared_ptr GemanMcClure::Create(double c, const ReweightScheme reweight) { - return shared_ptr(new GemanMcClure(c, reweight)); +GemanMcClure::shared_ptr GemanMcClure::Create( + double c, const GemanMcClure::GradScheme graduation, + const ReweightScheme reweight) { + return shared_ptr(new GemanMcClure(c, graduation, reweight)); } /* ************************************************************************* */ @@ -358,22 +448,43 @@ TruncatedLeastSquares::TruncatedLeastSquares(double c, const ReweightScheme rewe } } -double TruncatedLeastSquares::weight(double distance) const { - const auto w = Weight(distance * distance, csquared_, csquared_); - return w.value(); +double TruncatedLeastSquares::Weight(double c, double distance) { + if (std::abs(distance) <= c) { + return 1.0; + } + return 0.0; } -std::optional TruncatedLeastSquares::Weight(double distance2, double lowerbound, double upperbound) { +double TruncatedLeastSquares::Loss(double c, double csquared, double distance) { + if (std::abs(distance) <= c) { + return 0.5 * distance * distance; + } + return 0.5 * csquared; +} + +std::optional TruncatedLeastSquares::GNCWeight(double distance2, + double lowerbound, + double upperbound) { if (distance2 <= lowerbound) return 1.0; if (distance2 >= upperbound) return 0.0; return std::nullopt; } +double TruncatedLeastSquares::weight(double distance) const { + return Weight(c_, distance); +} + double TruncatedLeastSquares::loss(double distance) const { - if (std::abs(distance) <= c_) { - return 0.5 * distance * distance; - } - return 0.5 * csquared_; + return Loss(c_, csquared_, distance); +} + +double TruncatedLeastSquares::graduatedWeight(double distance, + double mu) const { + return Weight(mu * c_, distance); +} + +double TruncatedLeastSquares::graduatedLoss(double distance, double mu) const { + return Weight(mu * c_, distance); } void TruncatedLeastSquares::print(const std::string &s="") const { @@ -397,25 +508,34 @@ DCS::DCS(double c, const ReweightScheme reweight) : Base(reweight), c_(c) { } -double DCS::weight(double distance) const { - const double e2 = distance*distance; - if (e2 > c_) - { - const double w = 2.0*c_/(c_ + e2); - return w*w; +double DCS::Weight(double c, double distance) { + const double e2 = distance * distance; + if (e2 > c) { + const double w = 2.0 * c / (c + e2); + return w * w; } - return 1.0; } -double DCS::loss(double distance) const { +double DCS::Loss(double c, double distance) { // This is the simplified version of Eq 9 from (Agarwal13icra) // after you simplify and cancel terms. - const double e2 = distance*distance; - const double e4 = e2*e2; - const double c2 = c_*c_; + const double e2 = distance * distance; + const double e4 = e2 * e2; + const double c2 = c * c; + return (c2 * e2 + c * e4) / ((e2 + c) * (e2 + c)); +} + +double DCS::weight(double distance) const { return Weight(c_, distance); } + +double DCS::loss(double distance) const { return Loss(c_, distance); } + +double DCS::graduatedWeight(double distance, double mu) const { + return Weight(mu * c_, distance); +} - return (c2*e2 + c_*e4) / ((e2 + c_)*(e2 + c_)); +double DCS::graduatedLoss(double distance, double mu) const { + return Weight(mu * c_, distance); } void DCS::print(const std::string &s="") const { @@ -457,6 +577,15 @@ double L2WithDeadZone::loss(double distance) const { return (abs_error < k_) ? 0.0 : 0.5*(k_-abs_error)*(k_-abs_error); } +double L2WithDeadZone::graduatedWeight(double /*error*/, + double /*error*/) const { + throw std::logic_error("L2WithDeadZone loss has no graduated form."); +} + +double L2WithDeadZone::graduatedLoss(double /*error*/, double /*error*/) const { + throw std::logic_error("L2WithDeadZone loss has no graduated form."); +} + void L2WithDeadZone::print(const std::string &s="") const { std::cout << s << ": L2WithDeadZone (" << k_ << ")" << std::endl; } @@ -505,6 +634,16 @@ double AsymmetricTukey::loss(double distance) const { return csquared_ / 6.0; } +double AsymmetricTukey::graduatedWeight(double /*error*/, + double /*error*/) const { + throw std::logic_error("AsymmetricTukey loss has no graduated form."); +} + +double AsymmetricTukey::graduatedLoss(double /*error*/, + double /*error*/) const { + throw std::logic_error("AsymmetricTukey loss has no graduated form."); +} + void AsymmetricTukey::print(const std::string &s="") const { std::cout << s << ": AsymmetricTukey (" << c_ << ")" << std::endl; } @@ -549,6 +688,16 @@ double AsymmetricCauchy::loss(double distance) const { return ksquared_ * val * 0.5; } +double AsymmetricCauchy::graduatedWeight(double /*error*/, + double /*error*/) const { + throw std::logic_error("AsymmetricCauchy loss has no graduated form."); +} + +double AsymmetricCauchy::graduatedLoss(double /*error*/, + double /*error*/) const { + throw std::logic_error("AsymmetricCauchy loss has no graduated form."); +} + void AsymmetricCauchy::print(const std::string &s="") const { std::cout << s << ": AsymmetricCauchy (" << k_ << ")" << std::endl; } @@ -568,16 +717,36 @@ AsymmetricCauchy::shared_ptr AsymmetricCauchy::Create(double k, const ReweightSc // Custom /* ************************************************************************* */ -Custom::Custom(std::function weight, std::function loss, const ReweightScheme reweight, - std::string name) - : Base(reweight), weight_(std::move(weight)), loss_(loss), name_(std::move(name)) {} - -double Custom::weight(double distance) const { - return weight_(distance); +Custom::Custom(std::function weight, + std::function loss, + std::optional> grad_weight, + std::optional> grad_loss, + const ReweightScheme reweight, std::string name) + : Base(reweight), + weight_(std::move(weight)), + loss_(loss), + grad_weight_(grad_weight), + grad_loss_(grad_loss), + name_(std::move(name)) {} + +double Custom::weight(double distance) const { return weight_(distance); } + +double Custom::loss(double distance) const { return loss_(distance); } + +double Custom::graduatedWeight(double distance, double mu) const { + if (grad_weight_) { + return (*grad_weight_)(distance, mu); + } else { + throw std::logic_error("Custom loss provided no graduated form."); + } } -double Custom::loss(double distance) const { - return loss_(distance); +double Custom::graduatedLoss(double distance, double mu) const { + if (grad_loss_) { + return (*grad_loss_)(distance, mu); + } else { + throw std::logic_error("Custom loss provided no graduated form."); + } } void Custom::print(const std::string &s = "") const { @@ -592,9 +761,14 @@ bool Custom::equals(const Base &expected, double tol) const { loss_.target() == p->loss_.target() && reweight_ == p->reweight_; } -Custom::shared_ptr Custom::Create(std::function weight, std::function loss, - const ReweightScheme reweight, const std::string &name) { - return std::make_shared(std::move(weight), std::move(loss), reweight, name); +Custom::shared_ptr Custom::Create( + std::function weight, std::function loss, + std::optional> grad_weight, + std::optional> grad_loss, + const ReweightScheme reweight, const std::string& name) { + return std::make_shared(std::move(weight), std::move(loss), + std::move(grad_weight), std::move(grad_loss), + reweight, name); } } // namespace mEstimator diff --git a/gtsam/linear/LossFunctions.h b/gtsam/linear/LossFunctions.h index 3976466d48..b1890c5e42 100644 --- a/gtsam/linear/LossFunctions.h +++ b/gtsam/linear/LossFunctions.h @@ -54,6 +54,13 @@ namespace noiseModel { * and hence we can solve the equivalent weighted least squares problem \sum w(r_i) \rho(r_i) * * Each M-estimator in the mEstimator name space simply implements the above functions. + * + * Each M-estimator additionally implements "graduated" versions of these functions. + * Name Symbol + * Graduated Loss \phi(x,\mu) + * Graduated Weight \w(x,\mu) + * The control parameter \mu transitions the loss from convex to its original robust form. + * This is used by continuation-style algorithms (GNC, riSAM) to modify the underlying problem structure. */ // clang-format on namespace mEstimator { @@ -109,6 +116,22 @@ class GTSAM_EXPORT Base { */ virtual double weight(double distance) const = 0; + /** + * This method is responsible for returning the total penalty for a given + * amount of error and the current control parameter \f$\mu\f$. + * + * This returns \f$\rho(x, \mu)\f$ in \ref mEstimator + */ + virtual double graduatedLoss(double distance, double mu) const = 0; + + /** + * This method is responsible for returning the weight for a given + * amount of error and the current control parameter \f$\mu\f$. + * + * This returns \f$w(x, \mu)\f$ in \ref mEstimator + */ + virtual double graduatedWeight(double distance, double mu) const = 0; + virtual void print(const std::string &s) const = 0; virtual bool equals(const Base &expected, double tol = 1e-8) const = 0; @@ -157,6 +180,8 @@ class GTSAM_EXPORT Null : public Base { ~Null() override {} double weight(double /*error*/) const override { return 1.0; } double loss(double distance) const override { return 0.5 * distance * distance; } + double graduatedWeight(double distance, double /*error*/) const override { return weight(distance); } + double graduatedLoss(double distance, double /*error*/) const override { return loss(distance); } void print(const std::string &s) const override; bool equals(const Base & /*expected*/, double /*tol*/) const override { return true; } static shared_ptr Create(); @@ -179,6 +204,8 @@ class GTSAM_EXPORT Null : public Base { * - Loss \rho(x) = c² (|x|/c - log(1+|x|/c)) * - Derivative \phi(x) = x/(1+|x|/c) * - Weight w(x) = \phi(x)/x = 1/(1+|x|/c) + * + * Fair loss has no graduated form. */ class GTSAM_EXPORT Fair : public Base { protected: @@ -190,6 +217,8 @@ class GTSAM_EXPORT Fair : public Base { Fair(double c = 1.3998, const ReweightScheme reweight = Block); double weight(double distance) const override; double loss(double distance) const override; + double graduatedWeight(double /*error*/, double /*error*/) const override; + double graduatedLoss(double /*error*/, double /*error*/) const override; void print(const std::string &s) const override; bool equals(const Base &expected, double tol = 1e-8) const override; static shared_ptr Create(double c, const ReweightScheme reweight = Block); @@ -214,6 +243,9 @@ class GTSAM_EXPORT Fair : public Base { * - Loss \rho(x) = 0.5 x² if |x| Robust: 1 + * - Loss, Derivative, and Weight computed by scaling k by control parameter mu */ class GTSAM_EXPORT Huber : public Base { protected: @@ -225,11 +257,18 @@ class GTSAM_EXPORT Huber : public Base { Huber(double k = 1.345, const ReweightScheme reweight = Block); double weight(double distance) const override; double loss(double distance) const override; + double graduatedWeight(double distance, double mu) const override; + double graduatedLoss(double distance, double mu) const override; void print(const std::string &s) const override; bool equals(const Base &expected, double tol = 1e-8) const override; static shared_ptr Create(double k, const ReweightScheme reweight = Block); double modelParameter() const { return k_; } + /// @brief Static implementation of Huber Weight + static double Weight(double k, double distance); + /// @brief Static implementation of Huber Loss + static double Loss(double k, double distance); + private: #if GTSAM_ENABLE_BOOST_SERIALIZATION /** Serialization function */ @@ -254,6 +293,9 @@ class GTSAM_EXPORT Huber : public Base { * - Loss \rho(x) = 0.5 k² log(1+x²/k²) * - Derivative \phi(x) = (k²x)/(x²+k²) * - Weight w(x) = \phi(x)/x = k²/(x²+k²) + * + * Cauchy loss is graduated from Convex: \infty -> Robust: 1 + * - Loss, Derivative, and Weight computed by scaling k by control parameter mu */ class GTSAM_EXPORT Cauchy : public Base { protected: @@ -265,11 +307,18 @@ class GTSAM_EXPORT Cauchy : public Base { Cauchy(double k = 0.1, const ReweightScheme reweight = Block); double weight(double distance) const override; double loss(double distance) const override; + double graduatedWeight(double distance, double mu) const override; + double graduatedLoss(double distance, double mu) const override; void print(const std::string &s) const override; bool equals(const Base &expected, double tol = 1e-8) const override; static shared_ptr Create(double k, const ReweightScheme reweight = Block); double modelParameter() const { return k_; } + /// @brief Static implementation of Cauchy Weight + static double Weight(double ksquared, double distance); + /// @brief Static implementation of Cauchy Loss + static double Loss(double ksquared, double distance); + private: #if GTSAM_ENABLE_BOOST_SERIALIZATION /** Serialization function */ @@ -290,6 +339,9 @@ class GTSAM_EXPORT Cauchy : public Base { * - Loss \f$ \rho(x) = c² (1 - (1-x²/c²)³)/6 \f$ if |x| Robust: 1 + * - Loss, Derivative, and Weight computed by scaling c by control parameter mu */ class GTSAM_EXPORT Tukey : public Base { protected: @@ -301,11 +353,19 @@ class GTSAM_EXPORT Tukey : public Base { Tukey(double c = 4.6851, const ReweightScheme reweight = Block); double weight(double distance) const override; double loss(double distance) const override; + double graduatedWeight(double distance, double mu) const override; + double graduatedLoss(double distance, double mu) const override; void print(const std::string &s) const override; bool equals(const Base &expected, double tol = 1e-8) const override; static shared_ptr Create(double k, const ReweightScheme reweight = Block); double modelParameter() const { return c_; } + /// @brief Static implementation of Tukey Weight + static double Weight(double c, double csquared, double distance); + /// @brief Static implementation of Tukey Loss + static double Loss(double c, double csquared, double distance); + + private: #if GTSAM_ENABLE_BOOST_SERIALIZATION /** Serialization function */ @@ -325,6 +385,9 @@ class GTSAM_EXPORT Tukey : public Base { * - Loss \f$ \rho(x) = -0.5 c² (exp(-x²/c²) - 1) \f$ * - Derivative \f$ \phi(x) = x exp(-x²/c²) \f$ * - Weight \f$ w(x) = \phi(x)/x = exp(-x²/c²) \f$ + * + * Welsch loss is graduated from Convex: \infty -> Robust: 1 + * - Loss, Derivative, and Weight computed by scaling c by control parameter mu */ class GTSAM_EXPORT Welsch : public Base { protected: @@ -336,11 +399,18 @@ class GTSAM_EXPORT Welsch : public Base { Welsch(double c = 2.9846, const ReweightScheme reweight = Block); double weight(double distance) const override; double loss(double distance) const override; + double graduatedWeight(double distance, double mu) const override; + double graduatedLoss(double distance, double mu) const override; void print(const std::string &s) const override; bool equals(const Base &expected, double tol = 1e-8) const override; static shared_ptr Create(double k, const ReweightScheme reweight = Block); double modelParameter() const { return c_; } + /// @brief Static implementation of Welsch Weight + static double Weight(double csquared, double distance); + /// @brief Static implementation of Welsch Loss + static double Loss(double csquared, double distance); + private: #if GTSAM_ENABLE_BOOST_SERIALIZATION /** Serialization function */ @@ -363,36 +433,46 @@ class GTSAM_EXPORT Welsch : public Base { * - Loss \rho(x) = 0.5 (c²x²)/(c²+x²) * - Derivative \phi(x) = xc⁴/(c²+x²)² * - Weight w(x) = \phi(x)/x = c⁴/(c²+x²)² + * + * Geman-McClure loss has two graduated forms + * + * STANDARD [1] is graduated from Convex: \infty -> Robust: 1 + * - Loss, Derivative, and Weight computed by scaling c² by control parameter mu + * + * SCALE_INVARIANT [2] is graduated from Convex: 0.0 -> Robust: 1.0 + * - Loss \rho(x) = 0.5 (c²x²)/(c²+(x²)^\mu) + * - Derivative \phi(x) = x(c²(c²+(x²)^\mu * (1-\mu)))/(c²+(x²)^\mu)² + * - Weight w(x) = \phi(x)/x = (c²(c²+(x²)^\mu * (1-\mu)))/(c²+(x²)^\mu)² */ class GTSAM_EXPORT GemanMcClure : public Base { public: typedef std::shared_ptr shared_ptr; + enum GradScheme { STANDARD, SCALE_INVARIANT }; - GemanMcClure(double c = 1.0, const ReweightScheme reweight = Block); + GemanMcClure(double c = 1.0, + const GradScheme graduation = GradScheme::STANDARD, + const ReweightScheme reweight = Block); ~GemanMcClure() override {} double weight(double distance) const override; double loss(double distance) const override; + double graduatedWeight(double distance, double mu) const override; + double graduatedLoss(double distance, double mu) const override; void print(const std::string &s) const override; bool equals(const Base &expected, double tol = 1e-8) const override; - static shared_ptr Create(double k, const ReweightScheme reweight = Block); + static shared_ptr Create(double k, + const GradScheme graduation = GradScheme::STANDARD, + const ReweightScheme reweight = Block); double modelParameter() const { return c_; } - /** @brief A static helper function to compute the Geman-McClure robust weight. - * The static function takes the squared value of the residual and the scale parameter. - * The weight member function now calls this function. While the member function takes the residual as input, - * it passes x² and c² to the static helper. - * - * w(x², c²) = \phi(x)/x = c⁴/(c²+x²)² - * - * - * @param distance2 Squared residual magnitude. - * @param c2 Squared scale parameter. - * @return Weight w(x) in (0, 1] - */ - static double Weight(double distance2, double c2); + + /// @brief Static implementation of GemanMcClure Weight + static double Weight(double csquared, double distance); + /// @brief Static implementation of GemanMcClure Loss + static double Loss(double csquared, double distance); protected: double c_; double csquared_; + GradScheme graduation_; private: #if GTSAM_ENABLE_BOOST_SERIALIZATION @@ -414,6 +494,9 @@ class GTSAM_EXPORT GemanMcClure : public Base { * - Loss \rho(x) = 0.5 x^2 if |x|<=c, 0.5 c^2 otherwise * - Derivative \phi(x) = x if |x|<=c, 0 otherwise * - Weight w(x) = \phi(x)/x = 1 if |x|<=c, 0 otherwise + * + * TLS loss is graduated from Convex: \infty -> Robust: 1 + * - Loss, Derivative, and Weight computed by scaling c by control parameter mu */ class GTSAM_EXPORT TruncatedLeastSquares : public Base { public: @@ -422,11 +505,19 @@ class GTSAM_EXPORT TruncatedLeastSquares : public Base { TruncatedLeastSquares(double c = 1.0, const ReweightScheme reweight = Block); double weight(double distance) const override; double loss(double distance) const override; + double graduatedWeight(double distance, double mu) const override; + double graduatedLoss(double distance, double mu) const override; void print(const std::string &s) const override; bool equals(const Base &expected, double tol = 1e-8) const override; static shared_ptr Create(double c, const ReweightScheme reweight = Block); double modelParameter() const { return c_; } - /** @brief A static helper function to compute the TLS robust weight. + + /// @brief Static implementation of TLS Weight + static double Weight(double c, double distance); + /// @brief Static implementation of TLS Loss + static double Loss(double c, double csquared, double distance); + + /** @brief A static helper function to compute the TLS GNC robust weight. * The static function takes the squared value of the residual, the squared lower bound, the squared upper bound. * This helper returns a optional because it is also used for GNC, and we encounter transition weight cases, * where the weight is not strictly binary (0 or 1) when the residual is within the transition region between inliers and outliers. @@ -438,7 +529,7 @@ class GTSAM_EXPORT TruncatedLeastSquares : public Base { * @param upperbound Squared upper bound. * @return Weight w(x) is {0, 1} or None if the residual is between lowerbound and upperbound. */ - static std::optional Weight(double distance2, double lowerbound, double upperbound); + static std::optional GNCWeight(double distance2, double lowerbound, double upperbound); protected: double c_; @@ -468,6 +559,9 @@ class GTSAM_EXPORT TruncatedLeastSquares : public Base { * - Loss \rho(x) = (c²x² + cx⁴)/(x²+c)² (for any "x") * - Derivative \phi(x) = 2c²x/(x²+c)² * - Weight w(x) = \phi(x)/x = 2c²/(x²+c)² if x²>c, 1 otherwise + * + * DCS loss is graduated from Convex: \infty -> Robust: 1 + * - Loss, Derivative, and Weight computed by scaling c by control parameter mu */ class GTSAM_EXPORT DCS : public Base { public: @@ -477,11 +571,18 @@ class GTSAM_EXPORT DCS : public Base { ~DCS() override {} double weight(double distance) const override; double loss(double distance) const override; + double graduatedWeight(double distance, double mu) const override; + double graduatedLoss(double distance, double mu) const override; void print(const std::string &s) const override; bool equals(const Base &expected, double tol = 1e-8) const override; static shared_ptr Create(double k, const ReweightScheme reweight = Block); double modelParameter() const { return c_; } + /// @brief Static implementation of DCS Weight + static double Weight(double c, double distance); + /// @brief Static implementation of DCS Loss + static double Loss(double c, double distance); + protected: double c_; @@ -509,6 +610,8 @@ class GTSAM_EXPORT DCS : public Base { * - Loss \f$ \rho(x) = 0 \f$ if |x|k, (k+x) if x<-k * - Weight \f$ w(x) = \phi(x)/x = 0 \f$ if |x|k, (k+x)/x if x<-k + * + * L2WithDeadZone loss has no graduated form. */ class GTSAM_EXPORT L2WithDeadZone : public Base { protected: @@ -520,6 +623,8 @@ class GTSAM_EXPORT L2WithDeadZone : public Base { L2WithDeadZone(double k = 1.0, const ReweightScheme reweight = Block); double weight(double distance) const override; double loss(double distance) const override; + double graduatedWeight(double /*error*/, double /*error*/) const override; + double graduatedLoss(double /*error*/, double /*error*/) const override; void print(const std::string &s) const override; bool equals(const Base &expected, double tol = 1e-8) const override; static shared_ptr Create(double k, const ReweightScheme reweight = Block); @@ -545,6 +650,8 @@ class GTSAM_EXPORT L2WithDeadZone : public Base { * - Loss \rho(x) = c² (1 - (1-x²/c²)³)/6 if |x|; using CustomWeightFunction = std::function; +using CustomGraduatedLossFunction = + std::optional>; +using CustomGraduatedWeightFunction = + std::optional>; /** Implementation of the "Custom" robust error model. * * This model just takes two functions as input, one for the loss and one for the weight. + * + * Optionally this model can also define graduated loss functions */ class GTSAM_EXPORT Custom : public Base { protected: std::function weight_, loss_; + std::optional> grad_weight_, grad_loss_; std::string name_; public: typedef std::shared_ptr shared_ptr; Custom(CustomWeightFunction weight, CustomLossFunction loss, + CustomGraduatedWeightFunction grad_weight = std::nullopt, + CustomGraduatedLossFunction grad_loss = std::nullopt, const ReweightScheme reweight = Block, std::string name = "Custom"); double weight(double distance) const override; double loss(double distance) const override; - void print(const std::string &s) const override; - bool equals(const Base &expected, double tol = 1e-8) const override; - static shared_ptr Create(std::function weight, std::function loss, - const ReweightScheme reweight = Block, const std::string &name = "Custom"); + double graduatedWeight(double distance, double mu) const override; + double graduatedLoss(double distance, double mu) const override; + void print(const std::string& s) const override; + bool equals(const Base& expected, double tol = 1e-8) const override; + static shared_ptr Create( + std::function weight, std::function loss, + CustomGraduatedWeightFunction grad_weight = std::nullopt, + CustomGraduatedLossFunction grad_loss = std::nullopt, + const ReweightScheme reweight = Block, + const std::string& name = "Custom"); inline std::string& name() { return name_; } inline std::function& weightFunction() { return weight_; } diff --git a/gtsam/nonlinear/GncOptimizer.h b/gtsam/nonlinear/GncOptimizer.h index 3f4f769f59..d30525ba50 100644 --- a/gtsam/nonlinear/GncOptimizer.h +++ b/gtsam/nonlinear/GncOptimizer.h @@ -515,7 +515,7 @@ class GncOptimizer { for (size_t k = 0; k < nfg_.size(); k++) { if (needsWeightUpdate(factorTypes_[k])) { double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual - weights[k] = noiseModel::mEstimator::GemanMcClure::Weight(u2_k, mu * barcSq_[k]); + weights[k] = noiseModel::mEstimator::GemanMcClure::Weight(mu * barcSq_[k], u2_k); } } return weights; @@ -528,7 +528,7 @@ class GncOptimizer { case GncScheduler::SuperLinear: { double lowerbound = barcSq_[k]; double upperbound = ((mu + 1.0) * (mu + 1.0) / (mu * mu)) * barcSq_[k]; - auto w = noiseModel::mEstimator::TruncatedLeastSquares::Weight(u2_k, lowerbound, upperbound); + auto w = noiseModel::mEstimator::TruncatedLeastSquares::GNCWeight(u2_k, lowerbound, upperbound); if (w) { weights[k] = *w; } @@ -541,7 +541,7 @@ class GncOptimizer { case GncScheduler::Linear: { // use eq (14) in GNC paper double upperbound = ((mu + 1.0) / mu) * barcSq_[k]; double lowerbound = (mu / (mu + 1.0)) * barcSq_[k]; - auto w = noiseModel::mEstimator::TruncatedLeastSquares::Weight(u2_k, lowerbound, upperbound); + auto w = noiseModel::mEstimator::TruncatedLeastSquares::GNCWeight(u2_k, lowerbound, upperbound); if (w) { weights[k] = *w; } From b9d8ffff42ba448dda4fbed897609598481c4a2c Mon Sep 17 00:00:00 2001 From: Dan McGann Date: Sun, 29 Mar 2026 22:23:05 -0400 Subject: [PATCH 16/17] Refactor GradKernel into Robust Loss + Scheduler --- gtsam/sam/RISAM.cpp | 10 +- gtsam/sam/RISAMGraduatedFactor.cpp | 21 ++- gtsam/sam/RISAMGraduatedFactor.h | 50 ++++--- gtsam/sam/RISAMGraduationScheduler.cpp | 64 +++++++++ gtsam/sam/RISAMGraduationScheduler.h | 175 +++++++++++++++++++++++++ gtsam/sam/tests/testRISAM.cpp | 164 +++++++++++------------ 6 files changed, 360 insertions(+), 124 deletions(-) create mode 100644 gtsam/sam/RISAMGraduationScheduler.cpp create mode 100644 gtsam/sam/RISAMGraduationScheduler.h diff --git a/gtsam/sam/RISAM.cpp b/gtsam/sam/RISAM.cpp index c7dd528372..b79229127e 100644 --- a/gtsam/sam/RISAM.cpp +++ b/gtsam/sam/RISAM.cpp @@ -115,7 +115,7 @@ RISAM::UpdateResult RISAM::updateRobust( for (gtsam::FactorIndex fidx : remaining_convex_factors) { auto grad_factor = std::dynamic_pointer_cast(factors_.at(fidx)); - *(mu_[fidx]) = grad_factor->kernel()->updateMu( + *(mu_[fidx]) = grad_factor->scheduler()->updateMu( *(mu_[fidx]), grad_factor->residual(current_est), mu_update_count[fidx]); convexKeys.insert(convexKeys.end(), factors_.at(fidx)->begin(), @@ -139,7 +139,7 @@ RISAM::UpdateResult RISAM::updateRobust( for (gtsam::FactorIndex fidx : remaining_convex_factors) { auto grad_factor = std::dynamic_pointer_cast(factors_.at(fidx)); - if (!grad_factor->kernel()->isMuConverged(*(mu_[fidx]))) + if (!grad_factor->scheduler()->isMuConverged(*(mu_[fidx]))) new_remaining_convex_factors.push_back(fidx); } remaining_convex_factors = new_remaining_convex_factors; @@ -220,7 +220,7 @@ void RISAM::augmentMu(const gtsam::NonlinearFactorGraph& new_factors, std::shared_ptr mu, mu_init; if (grad_factor) { mu = grad_factor->mu_; - mu_init = std::make_shared(grad_factor->kernel()->muInit()); + mu_init = std::make_shared(grad_factor->scheduler()->muInit()); } else { mu = std::make_shared(0); mu_init = std::make_shared(0); @@ -260,10 +260,10 @@ void RISAM::incrementMuInits() { if (mahdist > mah_upper_bound) { *(mu_inits_[fidx]) = - grad_factor->kernel()->incrementMuInit(*(mu_inits_[fidx])); + grad_factor->scheduler()->updateMuInit(*(mu_inits_[fidx]), false); } else if (mahdist < mah_lower_bound) { *(mu_inits_[fidx]) = - grad_factor->kernel()->incrementMuInitInv(*(mu_inits_[fidx])); + grad_factor->scheduler()->updateMuInit(*(mu_inits_[fidx]), true); } } // Reset accumulator diff --git a/gtsam/sam/RISAMGraduatedFactor.cpp b/gtsam/sam/RISAMGraduatedFactor.cpp index 9496872835..061aa3512e 100644 --- a/gtsam/sam/RISAMGraduatedFactor.cpp +++ b/gtsam/sam/RISAMGraduatedFactor.cpp @@ -13,26 +13,25 @@ namespace gtsam { /* ************************************************************************* */ -GraduatedFactor::GraduatedFactor(GraduatedKernel::shared_ptr kernel) - : kernel_(kernel) { - mu_ = std::make_shared(kernel_->muInit()); +GraduatedFactor::GraduatedFactor(GraduatedFactor::RobustLoss::shared_ptr loss, + GraduationScheduler::shared_ptr scheduler) + : robust_loss_(loss), scheduler_(scheduler) { + mu_ = std::make_shared(scheduler_->muInit()); } /* ************************************************************************* */ GraduatedFactor::GraduatedFactor(const GraduatedFactor& other) - : kernel_(other.kernel_) { + : robust_loss_(other.robust_loss_), scheduler_(other.scheduler_) { mu_ = std::make_shared(*(other.mu_)); } /* ************************************************************************* */ -const GraduatedKernel::shared_ptr GraduatedFactor::kernel() const { - return kernel_; +const GraduatedFactor::RobustLoss::shared_ptr GraduatedFactor::loss() const { + return robust_loss_; } - /* ************************************************************************* */ -void GraduatedFactor::updateKernel( - const GraduatedKernel::shared_ptr& new_kernel) { - kernel_ = new_kernel; - *mu_ = new_kernel->muInit(); +const GraduationScheduler::shared_ptr GraduatedFactor::scheduler() const { + return scheduler_; } + } // namespace gtsam \ No newline at end of file diff --git a/gtsam/sam/RISAMGraduatedFactor.h b/gtsam/sam/RISAMGraduatedFactor.h index 40a2edce49..ba410cb997 100644 --- a/gtsam/sam/RISAMGraduatedFactor.h +++ b/gtsam/sam/RISAMGraduatedFactor.h @@ -18,7 +18,7 @@ */ #pragma once #include -#include +#include namespace gtsam { @@ -28,13 +28,17 @@ class GraduatedFactor { /// @{ public: typedef std::shared_ptr shared_ptr; + typedef noiseModel::mEstimator::Base RobustLoss; /// @} /// @name Fields /// @{ protected: - /// @brief The Graduated Robust Kernel for this factor - GraduatedKernel::shared_ptr kernel_; + /// @brief The robust loss for this factor + RobustLoss::shared_ptr robust_loss_; + /// @brief The control param ($\mu$) scheduler for this factor + GraduationScheduler::shared_ptr scheduler_; + /// @brief The unique mu control parameter for this factor std::shared_ptr mu_; /// @} @@ -46,10 +50,12 @@ class GraduatedFactor { /// @{ public: /** @brief Constructor - * @param kernel: The graduated kernel to apply to this factor * @param args: The arguments required to construct the FACTOR_TYPE + * @param loss: The graduated robust loss applied to this factor + * @param scheduler: The control param $\mu$ scheduler for this factor */ - GraduatedFactor(GraduatedKernel::shared_ptr kernel); + GraduatedFactor(RobustLoss::shared_ptr loss, + GraduationScheduler::shared_ptr scheduler); /// @brief Copy constructor GraduatedFactor(const GraduatedFactor& other); @@ -68,15 +74,14 @@ class GraduatedFactor { virtual double robustResidual( const gtsam::Values& current_estimate) const = 0; - /// @brief Returns the kernel for this graduated factor - const GraduatedKernel::shared_ptr kernel() const; + /// @brief Returns the robust loss for this graduated factor + const RobustLoss::shared_ptr loss() const; - /// @brief Updates the Kernel of this Graduated Factor - /// WARN: Resets mu_ to the mu_init of the kernel - void updateKernel(const GraduatedKernel::shared_ptr& new_kernel); + /// @brief Returns the graduation scheduler for this factor + const GraduationScheduler::shared_ptr scheduler() const; /// @brief Copies this factor as an instance of its base type without the - /// graduated kernel + /// graduated robust loss or scheduler virtual gtsam::NonlinearFactor::shared_ptr cloneUngraduated() const = 0; /// @} }; @@ -98,16 +103,21 @@ class GenericGraduatedFactor : public FACTOR_TYPE, public GraduatedFactor { /// @{ public: /** @brief Constructor - * @param kernel: The graduated kernel to use for this factor * @param args: The arguments required to construct the FACTOR_TYPE + * @param loss: The graduated robust loss applied to this factor + * @param scheduler: The control param $\mu$ scheduler for this factor */ template - GenericGraduatedFactor(GraduatedKernel::shared_ptr kernel, Args&&... args) - : FACTOR_TYPE(std::forward(args)...), GraduatedFactor(kernel) {} + GenericGraduatedFactor(RobustLoss::shared_ptr loss, + GraduationScheduler::shared_ptr scheduler, + Args&&... args) + : FACTOR_TYPE(std::forward(args)...), + GraduatedFactor(loss, scheduler) {} /// @brief Copy Constructor GenericGraduatedFactor(const GenericGraduatedFactor& other) - : FACTOR_TYPE(other), GraduatedFactor(other.kernel_) {} + : FACTOR_TYPE(other), + GraduatedFactor(other.robust_loss_, other.scheduler_) {} /// @brief Makes a deep copy of the factor gtsam::NonlinearFactor::shared_ptr clone() const override { @@ -119,7 +129,7 @@ class GenericGraduatedFactor : public FACTOR_TYPE, public GraduatedFactor { /// @brief Linearizes the factor using the current value of mu gtsam::GaussianFactor::shared_ptr linearizeGraduated( const gtsam::Values& current_estimate) const override { - double r = residual(current_estimate); + double residual = this->residual(current_estimate); // Use base factor to linearize gtsam::Matrix A; @@ -140,7 +150,11 @@ class GenericGraduatedFactor : public FACTOR_TYPE, public GraduatedFactor { } // Weight the Linearized Blocks - kernel_->weightSystem(Ablocks, b, r, *mu_); + double sqrt_weight = sqrt(robust_loss_->graduatedWeight(residual, *mu_)); + for (gtsam::Matrix& Aj : Ablocks) { + Aj *= sqrt_weight; + } + b *= sqrt_weight; // Construct a jacobian factor from the weighted system gtsam::FastMap Ablock_map; @@ -173,7 +187,7 @@ class GenericGraduatedFactor : public FACTOR_TYPE, public GraduatedFactor { /// @brief See GraduatedFactor::robustResidual double robustResidual(const gtsam::Values& current_estimate) const override { - return kernel_->error(residual(current_estimate), *mu_); + return robust_loss_->graduatedLoss(residual(current_estimate), *mu_); } /// @brief See GraduatedFactor::cloneUngraduated diff --git a/gtsam/sam/RISAMGraduationScheduler.cpp b/gtsam/sam/RISAMGraduationScheduler.cpp new file mode 100644 index 0000000000..d83bd15239 --- /dev/null +++ b/gtsam/sam/RISAMGraduationScheduler.cpp @@ -0,0 +1,64 @@ + +#include + +#include + +namespace gtsam { + +/* ************************************************************************* */ +double SIGScheduler::updateMu(const double& mu, const double& residual, + const size_t& update_count) const { + return this->mu_update_strat_(mu, residual, update_count); +} + +/* ************************************************************************* */ +double SIGScheduler::updateMuInit(const double& mu, + const bool is_inlier) const { + double new_mu = mu + (is_inlier ? -mu_init_increment_ : mu_init_increment_); + return std::clamp(new_mu, mu_init_, convergence_thresh_); +} + +/* ************************************************************************* */ +bool SIGScheduler::isMuConverged(const double& mu) const { + return mu >= convergence_thresh_; +} + +/* ************************************************************************* */ +double SIGScheduler::muUpdateMcGann2023(const double& mu, + const double& residual, + const size_t& update_count) { + return std::min(1.0, mu + (mu + 0.1) * 1.2); +} + +/* ************************************************************************* */ +double SIGScheduler::muUpdateStable(const double& mu, const double& residual, + const size_t& update_count) { + // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.5 if (t==0), mu[t] + + // 0.4 if (t==1), else mu[t] + 0.05 + // Authors attempt to build the most stable method possible from years of + // using the algorithm. + if (update_count == 0) return std::min(1.0, mu + 0.5); + if (update_count == 1) return std::min(1.0, mu + 0.4); + return std::min(1.0, mu + 0.05); +} + +/* ************************************************************************* */ +double ScaledScheduler::updateMu(const double& mu, const double& residual, + const size_t& update_count) const { + return std::clamp(mu_scale_ * mu, convergence_thresh_, mu_init_); +} + +/* ************************************************************************* */ +double ScaledScheduler::updateMuInit(const double& mu_init, + const bool is_inlier) const { + double new_mu_init = + mu_init * (is_inlier ? 1.0 / mu_init_scale_ : mu_init_scale_); + return std::clamp(new_mu_init, convergence_thresh_, mu_init_); +} + +/* ************************************************************************* */ +bool ScaledScheduler::isMuConverged(const double& mu) const { + return mu <= convergence_thresh_; +} + +} // namespace gtsam \ No newline at end of file diff --git a/gtsam/sam/RISAMGraduationScheduler.h b/gtsam/sam/RISAMGraduationScheduler.h new file mode 100644 index 0000000000..e1733e856b --- /dev/null +++ b/gtsam/sam/RISAMGraduationScheduler.h @@ -0,0 +1,175 @@ +/** @brief Interface and implementations for GraduationScheduler + * These are used to define the sequence of problems solved by RISAM. + * The scheduler defines these problems using the the control parameter $\mu$ + * for a specific Graduated Robust Loss Function. + * @author Dan McGann + * @date Mar 2022 + */ +#pragma once +#include +#include +#include + +#include +#include + +namespace gtsam { + +/** @brief Base class for graduation scheduling for riSAM + * Advanced users can write their own schedulers by inheriting from this class + */ +class GraduationScheduler { + /// @name Types + /// @{ + public: + typedef std::shared_ptr shared_ptr; + /// @} + + /// @name Fields + /// @{ + protected: + /// @brief The initial value for mu $\mu_{init}$ + const double mu_init_; + /// @brief The threshold at which to consider mu to be converged + const double convergence_thresh_; + /// @} + + /// @name Public Interface + /// @{ + public: + GraduationScheduler(double mu_init, double convergence_thresh) + : mu_init_(mu_init), convergence_thresh_(convergence_thresh) {} + + /// @brief Returns the value of $\mu_{init}$ for this graduated + double muInit() const { return mu_init_; } + + /** @brief Returns the next value of $\mu$ for this graduated + * @param mu: The current value of $\mu$ + * @param residual: The current residual of the factor + * @param update_count: The number of mu updates during this solve + */ + virtual double updateMu(const double& mu, const double& residual, + const size_t& update_count) const = 0; + + /** @brief Returns the next value of $\mu_{init}$ for strong inliers/outliers + * - Inliers are updated to have more convex $\mu_{init}$ + * - Outliers are update to have less convex $\mu_{init}$ + * @param mu_init: The current value of $\mu_{init}$ + * @param is_inlier: Flag indicating inlier update otherwise an outlier update + */ + virtual double updateMuInit(const double& mu_init, + const bool is_inlier) const = 0; + + /// @brief Returns true iff the value of $\mu$ has converged + virtual bool isMuConverged(const double& mu) const = 0; + /// @} +}; + +/* ************************************************************************* */ +/** @brief Scheduler for Geman McClure Loss with Scale Invariant Graduation + */ +class SIGScheduler : public GraduationScheduler { + /// @name Types + /// @{ + public: + /// @brief Shortcut for shared pointer + typedef std::shared_ptr shared_ptr; + /// @brief Function type for $\mu$ update sequence + typedef std::function MuUpdateStrategy; + /// @} + + /// @name Fields + /// @{ + private: + /// @brief The update strategy for the sequence $\mu$ values + MuUpdateStrategy mu_update_strat_; + /// @brief The amount to increment/decrement $\mu_{init}$ if the factor is a + /// strong inlier/outlier when values converge + double mu_init_increment_; + /// @} + + /// @name Public Interface + /// @{ + public: + /** @brief Individual Parameter Constructor + * @param mu_update_strat: The update strategy to use for $\mu$ updates + * Recommend: muUpdateStable, Alt: muUpdateMcGann2023 or Custom + * @param mu_init_increment: The amount to increment/decrement $\mu_{init}$ + */ + SIGScheduler(MuUpdateStrategy mu_update_strat = muUpdateStable, + double mu_init_increment = 0.2) + : GraduationScheduler(0.0, 1.0), + mu_update_strat_(mu_update_strat), + mu_init_increment_(mu_init_increment) {} + + /// @brief @see GraduationScheduler + double updateMu(const double& mu, const double& residual, + const size_t& update_count) const override; + /// @brief @see GraduationScheduler + double updateMuInit(const double& mu_init, + const bool is_inlier) const override; + /// @brief @see GraduationScheduler + bool isMuConverged(const double& mu) const override; + /// @} + + /// @name Default $\mu$ Update Strategies + /// @{ + public: + /// @brief $\mu$ update sequence empirically discovered and presented in the + /// orig riSAM paper + static double muUpdateMcGann2023(const double& mu, const double& residual, + const size_t& update_count); + /// @brief More stable $\mu$ Update sequence developed empirically since + /// algorithm was published + static double muUpdateStable(const double& mu, const double& residual, + const size_t& update_count); + /// @} +}; + +/* ************************************************************************* */ +/** @brief Scheduler losses that use scaled shape parameters for graduation. + * Examples include: Cauchy, TLS, Tukey + * These losses are Convex at \mu=\infty and Robust at \mu=1.0 + */ +class ScaledScheduler : public GraduationScheduler { + /// @name Types + /// @{ + public: + /// @brief Shortcut for shared pointer + typedef std::shared_ptr shared_ptr; + /// @} + + /// @name Fields + /// @{ + private: + /// @brief The scale factor to update $\mu$ values + double mu_scale_; + /// @brief The scale factor to update $\mu_{init}$ values + double mu_init_scale_; + /// @} + + /// @name Public Interface + /// @{ + public: + /** @brief Individual Parameter Constructor + * @param mu_init: The initial value for mu + * @param mu_scale: The scale factor for $\mu$ updates + * @param mu_init_increment: The amount to increment/decrement $\mu_{init}$ + */ + ScaledScheduler(double mu_init = 1e4, double mu_scale = 0.715, + double mu_init_scale = 0.9) + : GraduationScheduler(mu_init, 1.0), + mu_scale_(mu_scale), + mu_init_scale_(mu_init_scale) {} + + /// @brief @see GraduationScheduler + double updateMu(const double& mu, const double& residual, + const size_t& update_count) const override; + /// @brief @see GraduationScheduler + double updateMuInit(const double& mu_init, + const bool is_inlier) const override; + /// @brief @see GraduationScheduler + bool isMuConverged(const double& mu) const override; + /// @} +}; +} // namespace gtsam \ No newline at end of file diff --git a/gtsam/sam/tests/testRISAM.cpp b/gtsam/sam/tests/testRISAM.cpp index fc330ed534..258d9bff99 100644 --- a/gtsam/sam/tests/testRISAM.cpp +++ b/gtsam/sam/tests/testRISAM.cpp @@ -29,112 +29,87 @@ using namespace std; using namespace gtsam; +typedef noiseModel::mEstimator::GemanMcClure GMLoss; /* ************************************************************************* */ -TEST(RISAMGraduatedKernel, Error) { - SIGKernel kernel(1.0); - // At zero error is not dependent on mu - CHECK(assert_equal(0.0, kernel.error(0.0, 0.0), 1e-9)); - CHECK(assert_equal(0.0, kernel.error(0.0, 0.5), 1e-9)); - CHECK(assert_equal(0.0, kernel.error(0.0, 1.0), 1e-9)); - - // For Mu = 0.0 error is quadratic - CHECK(assert_equal(0.0025, kernel.error(0.1, 0.0), 1e-9)); - CHECK(assert_equal(0.4225, kernel.error(1.3, 0.0), 1e-9)); - CHECK(assert_equal(38.750625, kernel.error(12.45, 0.0), 1e-9)); - - // For 0.0 < Mu Error depends on shape - CHECK(assert_equal(0.00454545454, kernel.error(0.1, 0.5), 1e-9)); - CHECK(assert_equal(0.36739130434, kernel.error(1.3, 0.5), 1e-9)); - CHECK(assert_equal(5.76217472119, kernel.error(12.45, 0.5), 1e-9)); - - // For Mu == 1.0 error depends is Geman-Mcclure - CHECK(assert_equal(0.00495049504, kernel.error(0.1, 1.0), 1e-9)); - CHECK(assert_equal(0.31412639405, kernel.error(1.3, 1.0), 1e-9)); - CHECK(assert_equal(0.49679492315, kernel.error(12.45, 1.0), 1e-9)); +TEST(SIGScheduler, UpdateMuInit) { + SIGScheduler scheduler(SIGScheduler::muUpdateStable, 0.1); + CHECK(assert_equal(0.1, scheduler.updateMuInit(0.0, false), 1e-9)); + CHECK(assert_equal(1.0, scheduler.updateMuInit(1.0, false), 1e-9)); + + CHECK(assert_equal(0.0, scheduler.updateMuInit(0.0, true), 1e-9)); + CHECK(assert_equal(0.9, scheduler.updateMuInit(1.0, true), 1e-9)); } /* ************************************************************************* */ -TEST(RISAMGraduatedKernel, Weight) { - SIGKernel kernel(1.0); - // At Mu = 0 weights are identical at 0.5 - CHECK(assert_equal(0.5, kernel.weight(0.1, 0.0), 1e-9)); - CHECK(assert_equal(0.5, kernel.weight(1.3, 0.0), 1e-9)); - CHECK(assert_equal(0.5, kernel.weight(12.45, 0.0), 1e-9)); - - // At Mu = 1 weights are higher for low error - CHECK(assert_equal(0.9802960494, kernel.weight(0.1, 1.0), 1e-9)); - CHECK(assert_equal(0.13819598955, kernel.weight(1.3, 1.0), 1e-9)); - CHECK(assert_equal(0.00004109007, kernel.weight(12.45, 1.0), 1e-9)); - - // At Mu = 1 large residuals have ~0 weight - CHECK(assert_equal(0.0, kernel.weight(2000.0, 1.0), 1e-9)); - - // Across Mu residual of 0 will have weight = 1 - CHECK(assert_equal(1.0, kernel.weight(0.0, 0.1), 1e-9)); - CHECK(assert_equal(1.0, kernel.weight(0.0, 0.5), 1e-9)); - CHECK(assert_equal(1.0, kernel.weight(0.0, 0.8), 1e-9)); +TEST(SIGScheduler, IsMuConverged) { + SIGScheduler scheduler(SIGScheduler::muUpdateStable, 0.1); + CHECK(!scheduler.isMuConverged(0.5)); + CHECK(scheduler.isMuConverged(1.0)); } /* ************************************************************************* */ -TEST(RISAMGraduatedKernel, WeightSystem) { - SIGKernel kernel(1.0); - vector A; - A.push_back(Matrix::Identity(3, 3)); - Vector b = Vector::Ones(3); - - kernel.weightSystem(A, b, 0.1, 0.0); - CHECK(assert_equal(Matrix::Identity(3, 3) * sqrt(0.5), A[0], 1e-9)); - CHECK(assert_equal(Vector::Ones(3) * sqrt(0.5), b, 1e-9)); +TEST(SIGScheduler, MuUpdateMcGann2023) { + // Standard Sequence + CHECK(assert_equal(0.12, SIGScheduler::muUpdateMcGann2023(0.0, 0, 0), 1e-9)); + CHECK( + assert_equal(0.384, SIGScheduler::muUpdateMcGann2023(0.12, 0, 1), 1e-9)); + CHECK(assert_equal(0.9648, SIGScheduler::muUpdateMcGann2023(0.384, 0, 2), + 1e-9)); + CHECK( + assert_equal(1.0, SIGScheduler::muUpdateMcGann2023(0.9648, 0, 3), 1e-9)); } /* ************************************************************************* */ -TEST(RISAMGraduatedKernel, MuInitInc) { - SIGKernel kernel(1.0, SIGKernel::muUpdateStable, 0.1); - CHECK(assert_equal(0.1, kernel.incrementMuInit(0.0), 1e-9)); - CHECK(assert_equal(1.0, kernel.incrementMuInit(1.0), 1e-9)); +TEST(SIGScheduler, MuUpdateStable) { + // Standard Sequence + CHECK(assert_equal(0.5, SIGScheduler::muUpdateStable(0.0, 0.0, 0), 1e-9)); + CHECK(assert_equal(0.9, SIGScheduler::muUpdateStable(0.5, 0.0, 1), 1e-9)); + CHECK(assert_equal(0.95, SIGScheduler::muUpdateStable(0.9, 0.0, 2), 1e-9)); + CHECK(assert_equal(1.0, SIGScheduler::muUpdateStable(0.95, 0.0, 3), 1e-9)); - CHECK(assert_equal(0.0, kernel.incrementMuInitInv(0.0), 1e-9)); - CHECK(assert_equal(0.9, kernel.incrementMuInitInv(1.0), 1e-9)); -} + // Sequence with Non-Zero mu_init + CHECK(assert_equal(0.7, SIGScheduler::muUpdateStable(0.2, 0.0, 0), 1e-9)); + CHECK(assert_equal(1.0, SIGScheduler::muUpdateStable(0.7, 0.0, 1), 1e-9)); -/* ************************************************************************* */ -TEST(RISAMGraduatedKernel, IsMuConverged) { - SIGKernel kernel(1.0, SIGKernel::muUpdateStable, 0.1); - CHECK(!kernel.isMuConverged(0.5)); - CHECK(kernel.isMuConverged(1.0)); + // Sequence with Large Non-Zero mu_init + CHECK(assert_equal(1.0, SIGScheduler::muUpdateStable(0.7, 0.0, 0), 1e-9)); } /* ************************************************************************* */ -TEST(RISAMGraduatedKernel, MuUpdateMcGann2023) { +TEST(ScaledScheduler, UpdateMu) { + ScaledScheduler scheduler(1e4, 0.715, 0.9); // Standard Sequence - CHECK(assert_equal(0.12, SIGKernel::muUpdateMcGann2023(0.0, 0, 0), 1e-9)); - CHECK(assert_equal(0.384, SIGKernel::muUpdateMcGann2023(0.12, 0, 1), 1e-9)); - CHECK(assert_equal(0.9648, SIGKernel::muUpdateMcGann2023(0.384, 0, 2), 1e-9)); - CHECK(assert_equal(1.0, SIGKernel::muUpdateMcGann2023(0.9648, 0, 3), 1e-9)); + CHECK(assert_equal(71.5, scheduler.updateMu(100.0, 0.0, 0), 1e-9)); + CHECK(assert_equal(1.0, scheduler.updateMu(1.0, 0.0, 0), 1e-9)); } /* ************************************************************************* */ -TEST(RISAMGraduatedKernel, MuUpdateStable) { - // Standard Sequence - CHECK(assert_equal(0.5, SIGKernel::muUpdateStable(0.0, 0.0, 0), 1e-9)); - CHECK(assert_equal(0.9, SIGKernel::muUpdateStable(0.5, 0.0, 1), 1e-9)); - CHECK(assert_equal(0.95, SIGKernel::muUpdateStable(0.9, 0.0, 2), 1e-9)); - CHECK(assert_equal(1.0, SIGKernel::muUpdateStable(0.95, 0.0, 3), 1e-9)); +TEST(ScaledScheduler, UpdateMuInit) { + ScaledScheduler scheduler(1e4, 0.715, 0.9); + CHECK(assert_equal(90.0, scheduler.updateMuInit(100, false), 1e-9)); + CHECK(assert_equal(1.0, scheduler.updateMuInit(1.0, false), 1e-9)); - // Sequence with Non-Zero mu_init - CHECK(assert_equal(0.7, SIGKernel::muUpdateStable(0.2, 0.0, 0), 1e-9)); - CHECK(assert_equal(1.0, SIGKernel::muUpdateStable(0.7, 0.0, 1), 1e-9)); + CHECK(assert_equal(1e4, scheduler.updateMuInit(1e4, true), 1e-9)); + CHECK(assert_equal(111.111, scheduler.updateMuInit(100, true), 1e-3)); +} - // Sequence with Large Non-Zero mu_init - CHECK(assert_equal(1.0, SIGKernel::muUpdateStable(0.7, 0.0, 0), 1e-9)); +/* ************************************************************************* */ +TEST(ScaledScheduler, IsMuConverged) { + ScaledScheduler scheduler(1e4, 0.715, 0.9); + CHECK(!scheduler.isMuConverged(1e5)); + CHECK(!scheduler.isMuConverged(1.2)); + CHECK(scheduler.isMuConverged(1.0)); } /* ************************************************************************* */ TEST(RISAMGraduatedFactor, Linearize) { - SIGKernel::shared_ptr kernel = std::make_shared(1.0); + GMLoss::shared_ptr robust_loss = + GMLoss::Create(1.0, GMLoss::GradScheme::SCALE_INVARIANT); + SIGScheduler::shared_ptr scheduler = std::make_shared(); + NonlinearFactor::shared_ptr factor = - RISAM::make_graduated>(kernel, 0, 0.0, + RISAM::make_graduated>(robust_loss, scheduler, 0, 0.0, noiseModel::Unit::Create(1)); gtsam::Values values; @@ -148,9 +123,12 @@ TEST(RISAMGraduatedFactor, Linearize) { /* ************************************************************************* */ TEST(RISAMGraduatedFactor, Error) { - SIGKernel::shared_ptr kernel = std::make_shared(1.0); + GMLoss::shared_ptr robust_loss = + GMLoss::Create(1.0, GMLoss::GradScheme::SCALE_INVARIANT); + SIGScheduler::shared_ptr scheduler = std::make_shared(); + NonlinearFactor::shared_ptr factor = - RISAM::make_graduated>(kernel, 0, 0.0, + RISAM::make_graduated>(robust_loss, scheduler, 0, 0.0, noiseModel::Unit::Create(1)); gtsam::Values values; values.insert(0, 1.0); @@ -170,8 +148,11 @@ TEST(RISAM, RISAMIntegrationTest) { noiseModel::Diagonal::Sigmas((Vector(2) << M_PI / 100.0, 0.1).finished()); // Setup the Solver - SIGKernel::shared_ptr kernel = std::make_shared( - SIGKernel::shapeParamFromInfThresh(0.1, 3, 0.95)); + GMLoss::shared_ptr robust_loss = + GMLoss::Create(GMLoss::shapeParamFromInfThresh(0.1, 3, 0.95), + GMLoss::GradScheme::SCALE_INVARIANT); + SIGScheduler::shared_ptr scheduler = std::make_shared(); + RISAM::Parameters params; params.isam2_params = ISAM2Params( ISAM2DoglegLineSearchParams(0.02, 1.0, 1.5, 1e-3, false, 1e-4)); @@ -218,10 +199,12 @@ TEST(RISAM, RISAMIntegrationTest) { i, i + 1, Pose2(1.0, 0.0, 0.0), odoNoise); newfactors.push_back( RISAM::make_graduated>( - kernel, i, 100, Rot2::fromAngle(M_PI / 2.0), 5.0, brNoise)); + robust_loss, scheduler, i, 100, Rot2::fromAngle(M_PI / 2.0), 5.0, + brNoise)); newfactors.push_back( RISAM::make_graduated>( - kernel, i, 101, Rot2::fromAngle(-M_PI / 2.0), 5.0, brNoise)); + robust_loss, scheduler, i, 101, Rot2::fromAngle(-M_PI / 2.0), 5.0, + brNoise)); fullgraph.push_back(newfactors); Values init; @@ -257,18 +240,19 @@ TEST(RISAM, RISAMIntegrationTest) { i, i + 1, Pose2(1.0, 0.0, 0.0), odoNoise); newfactors.push_back( RISAM::make_graduated>( - kernel, i, 100, Rot2::fromAngle((3 * M_PI) / 4.0), 7.07106, - brNoise)); + robust_loss, scheduler, i, 100, Rot2::fromAngle((3 * M_PI) / 4.0), + 7.07106, brNoise)); newfactors.push_back( RISAM::make_graduated>( - kernel, i, 101, Rot2::fromAngle(-(3 * M_PI) / 4.0), 7.07106, - brNoise)); + robust_loss, scheduler, i, 101, Rot2::fromAngle(-(3 * M_PI) / 4.0), + 7.07106, brNoise)); fullgraph.push_back(newfactors); // Add an Outlier Measurement [is not added to fullGraph] newfactors.push_back( RISAM::make_graduated>( - kernel, i, 100, Rot2::fromAngle(0.4), 300, brNoise)); + robust_loss, scheduler, i, 100, Rot2::fromAngle(0.4), 300, + brNoise)); Values init; init.insert((i + 1), Pose2(double(i + 1) + 0.1, 0.1, 0.01)); From 4630c199088cf41f2867cb7ef15ef3d1c02ac3c7 Mon Sep 17 00:00:00 2001 From: Dan McGann Date: Sun, 29 Mar 2026 22:23:32 -0400 Subject: [PATCH 17/17] Remove Graduated Kernel + Add Tests --- gtsam/linear/LossFunctions.cpp | 16 +- gtsam/linear/LossFunctions.h | 14 ++ gtsam/linear/tests/testNoiseModel.cpp | 122 ++++++++++++++- gtsam/sam/RISAMGraduatedKernel.cpp | 80 ---------- gtsam/sam/RISAMGraduatedKernel.h | 206 -------------------------- 5 files changed, 148 insertions(+), 290 deletions(-) delete mode 100644 gtsam/sam/RISAMGraduatedKernel.cpp delete mode 100644 gtsam/sam/RISAMGraduatedKernel.h diff --git a/gtsam/linear/LossFunctions.cpp b/gtsam/linear/LossFunctions.cpp index f5b8867393..e409dcf5db 100644 --- a/gtsam/linear/LossFunctions.cpp +++ b/gtsam/linear/LossFunctions.cpp @@ -17,6 +17,7 @@ */ #include +#include #include #include @@ -437,6 +438,19 @@ GemanMcClure::shared_ptr GemanMcClure::Create( return shared_ptr(new GemanMcClure(c, graduation, reweight)); } +/* ************************************************************************* */ +double GemanMcClure::shapeParamFromInfThresh(double influence_thresh, size_t dof, + double chi2_outlier_thresh) { + double outlier_residual_thresh = + internal::chiSquaredQuantile(dof, chi2_outlier_thresh); + // Equation [d/dr \rho(x) = influence_thresh] solved for c + const double t1 = + std::sqrt(2 * influence_thresh * std::pow(outlier_residual_thresh, 5)); + const double t2 = influence_thresh * std::pow(outlier_residual_thresh, 2); + const double t3 = influence_thresh - 2 * outlier_residual_thresh; + return std::sqrt(-((t1 + t2) / t3)); +} + /* ************************************************************************* */ // TruncatedLeastSquares /* ************************************************************************* */ @@ -484,7 +498,7 @@ double TruncatedLeastSquares::graduatedWeight(double distance, } double TruncatedLeastSquares::graduatedLoss(double distance, double mu) const { - return Weight(mu * c_, distance); + return Loss(mu * c_, (mu * mu) * csquared_, distance); } void TruncatedLeastSquares::print(const std::string &s="") const { diff --git a/gtsam/linear/LossFunctions.h b/gtsam/linear/LossFunctions.h index b1890c5e42..3b93faf0a6 100644 --- a/gtsam/linear/LossFunctions.h +++ b/gtsam/linear/LossFunctions.h @@ -469,6 +469,20 @@ class GTSAM_EXPORT GemanMcClure : public Base { /// @brief Static implementation of GemanMcClure Loss static double Loss(double csquared, double distance); + /** @brief Static helper to compute shape param (c) using outlier influence. + * Computes a shape param such that an outlier (any measurement with + * residual equal to or greater than the chi2_outlier_threshold) will have an + * "influence" (derivative of loss) less than or equal to the + * influence_threshold: d/dx(\rho(x)) < influence_thresh + * @param influence_thresh - The max influence permited by an outlier + * @param dof - The degrees of freedom of the corresponding measurement + * @param chi2_outlier_thresh - The threshold for outlier (i.e. 0.95 = any + * measurement with residual greater than 95% of expected is an outlier) + * @returns The shape param + */ + static double shapeParamFromInfThresh(double influence_thresh, size_t dof, + double chi2_outlier_thresh); + protected: double c_; double csquared_; diff --git a/gtsam/linear/tests/testNoiseModel.cpp b/gtsam/linear/tests/testNoiseModel.cpp index f31f6ec323..7d9d5752e7 100644 --- a/gtsam/linear/tests/testNoiseModel.cpp +++ b/gtsam/linear/tests/testNoiseModel.cpp @@ -640,6 +640,23 @@ TEST(NoiseModel, robustFunctionHuber) DOUBLES_EQUAL(0.5000, huber->loss(error4), 1e-8); } +TEST(NoiseModel, robustFunctionHuberGraduated) { + const double k = 5.0, e1 = 1.0, e2 = 10.0; + const mEstimator::Huber::shared_ptr huber = mEstimator::Huber::Create(k); + // Convex For large \mu + DOUBLES_EQUAL(1.0, huber->graduatedWeight(e1, 1e8), 1e-6); + DOUBLES_EQUAL(1.0, huber->graduatedWeight(e2, 1e8), 1e-6); + // Standard for \mu = 1 + DOUBLES_EQUAL(huber->weight(e1), huber->graduatedWeight(e1, 1.0), 1e-6); + DOUBLES_EQUAL(huber->weight(e2), huber->graduatedWeight(e2, 1.0), 1e-6); + // Convex For large \mu + DOUBLES_EQUAL(0.5 * e1 * e1, huber->graduatedLoss(e1, 1e8), 1e-6); + DOUBLES_EQUAL(0.5 * e2 * e2, huber->graduatedLoss(e2, 1e8), 1e-6); + // Standard for \mu = 1 + DOUBLES_EQUAL(huber->loss(e1), huber->graduatedLoss(e1, 1.0), 1e-6); + DOUBLES_EQUAL(huber->loss(e2), huber->graduatedLoss(e2, 1.0), 1e-6); +} + TEST(NoiseModel, robustFunctionCauchy) { const double k = 5.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0; @@ -656,6 +673,23 @@ TEST(NoiseModel, robustFunctionCauchy) DOUBLES_EQUAL(0.490258914416017, cauchy->loss(error4), 1e-8); } +TEST(NoiseModel, robustFunctionCauchyGraduated) { + const double k = 5.0, e1 = 1.0, e2 = 10.0; + const mEstimator::Cauchy::shared_ptr cauchy = mEstimator::Cauchy::Create(k); + // Convex For large \mu + DOUBLES_EQUAL(1.0, cauchy->graduatedWeight(e1, 1e8), 1e-6); + DOUBLES_EQUAL(1.0, cauchy->graduatedWeight(e2, 1e8), 1e-6); + // Standard for \mu = 1 + DOUBLES_EQUAL(cauchy->weight(e1), cauchy->graduatedWeight(e1, 1.0), 1e-6); + DOUBLES_EQUAL(cauchy->weight(e2), cauchy->graduatedWeight(e2, 1.0), 1e-6); + // Convex For large \mu + DOUBLES_EQUAL(0.5 * e1 * e1, cauchy->graduatedLoss(e1, 1e8), 1e-6); + DOUBLES_EQUAL(0.5 * e2 * e2, cauchy->graduatedLoss(e2, 1e8), 1e-6); + // Standard for \mu = 1 + DOUBLES_EQUAL(cauchy->loss(e1), cauchy->graduatedLoss(e1, 1.0), 1e-6); + DOUBLES_EQUAL(cauchy->loss(e2), cauchy->graduatedLoss(e2, 1.0), 1e-6); +} + TEST(NoiseModel, robustFunctionAsymmetricCauchy) { const double k = 5.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0; @@ -687,6 +721,67 @@ TEST(NoiseModel, robustFunctionGemanMcClure) DOUBLES_EQUAL(0.2500, gmc->loss(error4), 1e-8); } +TEST(NoiseModel, robustFunctionGemanMcClureGraduatedScaled) { + const double k = 1.0, e1 = 1.0, e2 = 10.0; + const mEstimator::GemanMcClure::shared_ptr gmc = + mEstimator::GemanMcClure::Create(k); + // Convex For large \mu + DOUBLES_EQUAL(1.0, gmc->graduatedWeight(e1, 1e12), 1e-6); + DOUBLES_EQUAL(1.0, gmc->graduatedWeight(e2, 1e12), 1e-6); + // Standard for \mu = 1 + DOUBLES_EQUAL(gmc->weight(e1), gmc->graduatedWeight(e1, 1.0), 1e-6); + DOUBLES_EQUAL(gmc->weight(e2), gmc->graduatedWeight(e2, 1.0), 1e-6); + // Convex For large \mu + DOUBLES_EQUAL(0.5 * e1 * e1, gmc->graduatedLoss(e1, 1e12), 1e-6); + DOUBLES_EQUAL(0.5 * e2 * e2, gmc->graduatedLoss(e2, 1e12), 1e-6); + // Standard for \mu = 1 + DOUBLES_EQUAL(gmc->loss(e1), gmc->graduatedLoss(e1, 1.0), 1e-6); + DOUBLES_EQUAL(gmc->loss(e2), gmc->graduatedLoss(e2, 1.0), 1e-6); +} + +/* ************************************************************************* */ +TEST(NoiseModel, robustFunctionGemanMcClureGraduatedScaleInvariant) { + mEstimator::GemanMcClure::shared_ptr gmc = mEstimator::GemanMcClure::Create( + 1.0, mEstimator::GemanMcClure::GradScheme::SCALE_INVARIANT); + // At zero error is not dependent on mu + DOUBLES_EQUAL(0.0, gmc->graduatedLoss(0.0, 0.0), 1e-9); + CHECK(assert_equal(0.0, gmc->graduatedLoss(0.0, 0.5), 1e-9)); + CHECK(assert_equal(0.0, gmc->graduatedLoss(0.0, 1.0), 1e-9)); + + // For Mu = 0.0 error is quadratic + DOUBLES_EQUAL(0.0025, gmc->graduatedLoss(0.1, 0.0), 1e-9); + DOUBLES_EQUAL(0.4225, gmc->graduatedLoss(1.3, 0.0), 1e-9); + DOUBLES_EQUAL(38.750625, gmc->graduatedLoss(12.45, 0.0), 1e-9); + + // For 0.0 < Mu Error depends on shape + DOUBLES_EQUAL(0.00454545454, gmc->graduatedLoss(0.1, 0.5), 1e-9); + DOUBLES_EQUAL(0.36739130434, gmc->graduatedLoss(1.3, 0.5), 1e-9); + DOUBLES_EQUAL(5.76217472119, gmc->graduatedLoss(12.45, 0.5), 1e-9); + + // For Mu == 1.0 error depends is Geman-Mcclure + DOUBLES_EQUAL(0.00495049504, gmc->graduatedLoss(0.1, 1.0), 1e-9); + DOUBLES_EQUAL(0.31412639405, gmc->graduatedLoss(1.3, 1.0), 1e-9); + DOUBLES_EQUAL(0.49679492315, gmc->graduatedLoss(12.45, 1.0), 1e-9); + + // At Mu = 0 weights are identical at 0.5 + DOUBLES_EQUAL(0.5, gmc->graduatedWeight(0.1, 0.0), 1e-9); + DOUBLES_EQUAL(0.5, gmc->graduatedWeight(1.3, 0.0), 1e-9); + DOUBLES_EQUAL(0.5, gmc->graduatedWeight(12.45, 0.0), 1e-9); + + // At Mu = 1 weights are higher for low error + DOUBLES_EQUAL(0.9802960494, gmc->graduatedWeight(0.1, 1.0), 1e-9); + DOUBLES_EQUAL(0.13819598955, gmc->graduatedWeight(1.3, 1.0), 1e-9); + DOUBLES_EQUAL(0.00004109007, gmc->graduatedWeight(12.45, 1.0), 1e-9); + + // At Mu = 1 large residuals have ~0 weight + DOUBLES_EQUAL(0.0, gmc->graduatedWeight(2000.0, 1.0), 1e-9); + + // Across Mu residual of 0 will have weight = 1 + DOUBLES_EQUAL(1.0, gmc->graduatedWeight(0.0, 0.1), 1e-9); + DOUBLES_EQUAL(1.0, gmc->graduatedWeight(0.0, 0.5), 1e-9); + DOUBLES_EQUAL(1.0, gmc->graduatedWeight(0.0, 0.8), 1e-9); +} + TEST(NoiseModel, robustFunctionTLS) { const double k = 4.0, error1 = 0.5, error2 = 10.0, error3 = -10.0, error4 = -0.5; @@ -702,6 +797,24 @@ TEST(NoiseModel, robustFunctionTLS) DOUBLES_EQUAL(0.1250, tls->loss(error4), 1e-8); } +TEST(NoiseModel, robustFunctionTruncatedLeastSquaresGraduated) { + const double k = 5.0, e1 = 1.0, e2 = 10.0; + const mEstimator::TruncatedLeastSquares::shared_ptr tls = + mEstimator::TruncatedLeastSquares::Create(k); + // Convex For large \mu + DOUBLES_EQUAL(1.0, tls->graduatedWeight(e1, 1e12), 1e-6); + DOUBLES_EQUAL(1.0, tls->graduatedWeight(e2, 1e12), 1e-6); + // Standard for \mu = 1 + DOUBLES_EQUAL(tls->weight(e1), tls->graduatedWeight(e1, 1.0), 1e-6); + DOUBLES_EQUAL(tls->weight(e2), tls->graduatedWeight(e2, 1.0), 1e-6); + // Convex For large \mu + DOUBLES_EQUAL(0.5 * e1 * e1, tls->graduatedLoss(e1, 1e12), 1e-6); + DOUBLES_EQUAL(0.5 * e2 * e2, tls->graduatedLoss(e2, 1e12), 1e-6); + // Standard for \mu = 1 + DOUBLES_EQUAL(tls->loss(e1), tls->graduatedLoss(e1, 1.0), 1e-6); + DOUBLES_EQUAL(tls->loss(e2), tls->graduatedLoss(e2, 1.0), 1e-6); +} + TEST(NoiseModel, robustFunctionWelsch) { const double k = 5.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0; @@ -809,9 +922,11 @@ TEST(NoiseModel, robustNoiseGemanMcClure) const double a00 = 1.0, a01 = 10.0, a10 = 100.0, a11 = 1000.0; Matrix A = (Matrix(2, 2) << a00, a01, a10, a11).finished(); Vector b = Vector2(error1, error2); - const Robust::shared_ptr robust = Robust::Create( - mEstimator::GemanMcClure::Create(k, mEstimator::GemanMcClure::Scalar), - Unit::Create(2)); + const Robust::shared_ptr robust = + Robust::Create(mEstimator::GemanMcClure::Create( + k, mEstimator::GemanMcClure::GradScheme::STANDARD, + mEstimator::GemanMcClure::Scalar), + Unit::Create(2)); robust->WhitenSystem(A, b); @@ -908,6 +1023,7 @@ TEST(NoiseModel, robustNoiseCustomHuber) { const auto abs_e = std::abs(e); return abs_e <= k ? abs_e * abs_e / 2.0 : k * abs_e - k * k / 2.0; }, + std::nullopt, std::nullopt, noiseModel::mEstimator::Custom::Scalar, "Huber"), Unit::Create(2)); diff --git a/gtsam/sam/RISAMGraduatedKernel.cpp b/gtsam/sam/RISAMGraduatedKernel.cpp deleted file mode 100644 index 627f97ab1a..0000000000 --- a/gtsam/sam/RISAMGraduatedKernel.cpp +++ /dev/null @@ -1,80 +0,0 @@ - -#include - -#include - -namespace gtsam { - -/* ************************************************************************* */ -double SIGKernel::error(const double& residual, const double& mu) const { - double r2 = residual * residual; - double c2 = this->shape_param * this->shape_param; - return 0.5 * (c2 * r2) / (c2 + pow(r2, mu)); -} - -/* ************************************************************************* */ -double SIGKernel::weight(const double& residual, const double& mu) const { - double r2 = residual * residual; - double c2 = this->shape_param * this->shape_param; - double sqrt_denom = c2 + pow(r2, mu); - return (c2 * (c2 + pow(r2, mu) * (1 - mu))) / (sqrt_denom * sqrt_denom); -} - -/* ************************************************************************* */ -double SIGKernel::updateMu(const double& mu, const double& residual, - const size_t& update_count) const { - return this->mu_update_strat(mu, residual, update_count); -} - -/* ************************************************************************* */ -double SIGKernel::incrementMuInit(const double& mu) const { - return std::min(1.0, mu + this->mu_init_increment); -} - -/* ************************************************************************* */ -double SIGKernel::incrementMuInitInv(const double& mu) const { - return std::max(0.0, mu - this->mu_init_increment); -} - -/* ************************************************************************* */ -bool SIGKernel::isMuConverged(const double& mu) const { - return mu >= convergence_thresh_; -} - -/* ************************************************************************* */ -double SIGKernel::shapeParamFromInfThresh(double influence_thresh, size_t dof, - double chi2_outlier_thresh) { - double outlier_residual_thresh = - internal::chiSquaredQuantile(dof, chi2_outlier_thresh); - // Equation from taking derivative of SIGKernel setting equal to - // influence_thresh and solving for mu=1 - const double t1 = - std::sqrt(2 * influence_thresh * std::pow(outlier_residual_thresh, 5)); - const double t2 = influence_thresh * std::pow(outlier_residual_thresh, 2); - const double t3 = influence_thresh - 2 * outlier_residual_thresh; - return std::sqrt(-((t1 + t2) / t3)); -} - -/* ************************************************************************* */ -double SIGKernel::shapeParamFromChi2(size_t dof, double chi2_threshold) { - return internal::chiSquaredQuantile(dof, chi2_threshold); -} - -/* ************************************************************************* */ -double SIGKernel::muUpdateMcGann2023(const double& mu, const double& residual, - const size_t& update_count) { - return std::min(1.0, mu + (mu + 0.1) * 1.2); -} - -/* ************************************************************************* */ -double SIGKernel::muUpdateStable(const double& mu, const double& residual, - const size_t& update_count) { - // Conditional Arithmetic Series: mu[t+1] = mu[t] + 0.5 if (t==0), mu[t] + - // 0.4 if (t==1), else mu[t] + 0.05 - // Authors attempt to build the most stable method possible from years of - // using the algorithm. - if (update_count == 0) return std::min(1.0, mu + 0.5); - if (update_count == 1) return std::min(1.0, mu + 0.4); - return std::min(1.0, mu + 0.05); -} -} // namespace gtsam \ No newline at end of file diff --git a/gtsam/sam/RISAMGraduatedKernel.h b/gtsam/sam/RISAMGraduatedKernel.h deleted file mode 100644 index d1dc6ffce2..0000000000 --- a/gtsam/sam/RISAMGraduatedKernel.h +++ /dev/null @@ -1,206 +0,0 @@ -/** @brief Interface for a graduated robust kernel. - * That is a robust kernel \rho(r_i, \mu) that varies from highly convex - * (quadratic) when \mu = \mu_{init} to non-convex (robust cost) as \mu trends - * towards \mu_{final} - * - * - * - * @author Dan McGann - * @date Mar 2022 - */ -#pragma once -#include -#include -#include - -#include -#include - -namespace gtsam { - -/** @brief Base class for graduated kernels for riSAM - * Advanced users can write their own kernels by inheriting from this class - */ -class GraduatedKernel { - /// @name Types - /// @{ - public: - typedef std::shared_ptr shared_ptr; - /// @} - - /// @name Fields - /// @{ - protected: - /// @brief The initial Value for mu - const double mu_init_; - /// @brief The threshold at which to consider mu to be converged - const double convergence_thresh_; - /// @} - - /// @name Public Interface - /// @{ - public: - GraduatedKernel(double mu_init, double convergence_thresh) - : mu_init_(mu_init), convergence_thresh_(convergence_thresh) {} - - /** @brief Computes the graduated robust error of the function - * Error = \rho(r_i) - * - * @param residual: r_i the current (whitened) residual - * @param mu: \mu the current convexity parameter - */ - virtual double error(const double& residual, const double& mu) const = 0; - - /** @brief Computes the weight of the graduated robust cost function - * weight = w(r_i) - * - * @param residual: r_i the current (whitened) residual - * @param mu: \mu the current convexity parameter - */ - virtual double weight(const double& residual, const double& mu) const = 0; - - /** @brief Weights a linearized system - * Weight results in the solving of: w_i * r_i^2 = \sqrt(w_i) (A_i x + b_i) = - * \sqrt(w_i)* A_i x + \sqrt(w_i) * b_i - * - * @param A: Whitened linear system from a factor (\Sigma^{-0.5} H_i) - * @param b: Whitened linear system error from a factor (0.5 * \Sigma^(-0.5) - * ||h_i(x_i) - z_i||^2 = 0.5 * r_i^2) - */ - void weightSystem(std::vector& A, gtsam::Vector& b, - const double& residual, const double& mu) const { - double sqrt_weight = sqrt(weight(residual, mu)); - for (gtsam::Matrix& Aj : A) { - Aj *= sqrt_weight; - } - b *= sqrt_weight; - } - - /// @brief Returns the value of \mu_{init} for this graduated kernel - double muInit() const { return mu_init_; } - - /// @brief Returns the next value of \mu for this graduated kernel - /// @param mu: The current value of mu - /// @param residual: The current residual of the factor - /// @param update_count: The number of mu updates during this graduated solve - /// 0 = just made convex - virtual double updateMu(const double& mu, const double& residual, - const size_t& update_count) const = 0; - - /// @brief Returns an incremented mu_init, used to decrease the influence of - /// known outliers - virtual double incrementMuInit(const double& mu_init) const = 0; - - /// @brief Returns the previous value of mu_init for this graduated kernel - virtual double incrementMuInitInv(const double& mu) const = 0; - - /// @brief Returns true iff the value of \mu has converged to a non-convex - /// state - virtual bool isMuConverged(const double& mu) const = 0; - /// @} -}; - -/* ************************************************************************* */ -/** @brief Implementation of the Scale Invariant Graduated Kernel - * Based on the Geman-McClure Kernel this kernel has the mathematical form: - * - * \rho(r) = 0.5 * \frac{c^2 * r^2}{c^2 + (r^2)^\mu} - * - * where c is the shape parameter and r is the residual of the factor - */ -class SIGKernel : public GraduatedKernel { - /// @name Types - /// @{ - public: - /// @brief Shortcut for shared pointer - typedef std::shared_ptr shared_ptr; - /// @brief Function type for mu update sequence - typedef std::function MuUpdateStrategy; - /// @} - - /// @name Fields - /// @{ - public: - /// @brief The shape parameter of the SIG kernel 'c' - double shape_param; - /// @brief The update strategy for the sequence mu_ values - MuUpdateStrategy mu_update_strat; - /// @brief The amount to increment/decrement mu init if the factor is a strong - /// inlier/outlier when values converge - double mu_init_increment; - /// @} - - - /// @name Public Interface - /// @{ - public: - /** @brief Individual Parameter Constructor - * @param shape_param: The shape parameter for the kernel - * @param mu_update_strat: The update strategy to use for mu updates - * @param mu_init_increment: The amount to increment/decrement mu init - */ - SIGKernel(double shape_param, - MuUpdateStrategy mu_update_strat = muUpdateStable, - double mu_init_increment = 0.2) - : GraduatedKernel(0.0, 1.0), - shape_param(shape_param), - mu_update_strat(mu_update_strat), - mu_init_increment(mu_init_increment) {} - - /// @brief @see GraduatedKernel - double error(const double& residual, const double& mu) const override; - /// @brief @see GraduatedKernel - double weight(const double& residual, const double& mu) const override; - /// @brief @see GraduatedKernel - double updateMu(const double& mu, const double& residual, - const size_t& update_count) const override; - /// @brief @see GraduatedKernel - double incrementMuInit(const double& mu) const override; - /// @brief @see GraduatedKernel - double incrementMuInitInv(const double& mu) const override; - /// @brief @see GraduatedKernel - bool isMuConverged(const double& mu) const override; - /// @} - - /// @name Static Helpers - /// @{ - public: - /** @brief Computes a shape param based on an an influence threshold for - * outliers Computes a shape param such that an outlier (any measurement with - * residual equal to or greater than the chi2_outlier_threshold) will have an - * "influence" (derivative of kernel at mu=1) less than or equal to the - * influence_threshold: d/dr(\rho(r^2)) < influence_thresh - * @param influence_thresh - The max influence permited by an outlier - * @param dof - The degrees of freedom of the corresponding measurement (use - * for chi2) - * @param chi2_outlier_thresh - The threshold for outlier (i.e. 0.95 = any - * measurement with residual greater than 95% of expected measurements is an - * outlier) - * @returns The shape param such that outliers will have inf <= - * influence_thresh - */ - static double shapeParamFromInfThresh(double influence_thresh, size_t dof, - double chi2_outlier_thresh); - - /** @brief Computes a shape param based on dimensionality and a chi2_threshold - * Computes a shape param c = chi2.inv_cdf(dof, thresh) - * @param dof - The degrees of freedom of the corresponding measurement - * @param chi2_threshold - The the threshold at which to compute the shape - * param value - * @returns The shape param that scales based on dimensionality of the - * measurement - */ - static double shapeParamFromChi2(size_t dof, double chi2_threshold); - - /** Default Mu Update Strategies **/ - /// @brief Mu Update sequence empirically discovered and presented in the orig - /// riSAM paper - static double muUpdateMcGann2023(const double& mu, const double& residual, - const size_t& update_count); - /// @brief More stable Mu Update sequence developed empirically since - /// algorithm was published - static double muUpdateStable(const double& mu, const double& residual, - const size_t& update_count); - /// @} -}; -} // namespace gtsam \ No newline at end of file