File factor.h#
-
namespace sym
-
Functions
-
template<typename Scalar>
std::ostream &operator<<(std::ostream &os, const sym::Factor<Scalar> &factor)#
-
std::ostream &operator<<(std::ostream &os, const sym::linearized_sparse_factor_t &factor)#
-
std::ostream &operator<<(std::ostream &os, const sym::linearized_sparse_factorf_t &factor)#
-
template<typename ScalarType>
class Factor - #include <factor.h>
A residual term for optimization.
Created from a function and a set of Keys that act as inputs. Given a Values as an evaluation point, generates a linear approximation to the residual function. Contains templated helper constructors to simplify creation.
Public Types
-
using Scalar = ScalarType
-
using LinearizedDenseFactor = typename LinearizedDenseFactorTypeHelper<Scalar>::Type
-
using LinearizedSparseFactor = typename LinearizedSparseFactorTypeHelper<Scalar>::Type
-
template<typename JacobianMatrixType>
using JacobianFunc = std::function<void(const Values<Scalar>&, const std::vector<index_entry_t>&, VectorX<Scalar>*, JacobianMatrixType*)>
-
using DenseJacobianFunc = JacobianFunc<MatrixX<Scalar>>
-
using SparseJacobianFunc = JacobianFunc<Eigen::SparseMatrix<Scalar>>
-
template<typename MatrixType>
using HessianFunc = std::function<void(const Values<Scalar>&, const std::vector<index_entry_t>&, VectorX<Scalar>*, MatrixType*, MatrixType*, VectorX<Scalar>*)>
-
using DenseHessianFunc = HessianFunc<MatrixX<Scalar>>
-
using SparseHessianFunc = HessianFunc<Eigen::SparseMatrix<Scalar>>
Public Functions
-
Factor() = default
-
Factor(DenseHessianFunc hessian_func, const std::vector<Key> &keys_to_func, const std::vector<Key> &keys_to_optimize = {})
Create directly from a (dense/sparse) hessian functor. This is the lowest-level constructor.
- Parameters:
keys_to_func – The set of input arguments, in order, accepted by func.
keys_to_optimize – The set of input arguments that correspond to the derivative in func. Must be a subset of keys_to_func. If empty, then all keys_to_func are optimized.
-
Factor(SparseHessianFunc hessian_func, const std::vector<Key> &keys_to_func, const std::vector<Key> &keys_to_optimize = {})
-
Factor(const DenseJacobianFunc &jacobian_func, const std::vector<Key> &keys_to_func, const std::vector<Key> &keys_to_optimize = {})
Create from a function that computes the (dense/sparse) jacobian. The hessian will be computed using the Gauss Newton approximation:
H = J.T * J rhs = J.T * b
- Parameters:
keys_to_func – The set of input arguments, in order, accepted by func.
keys_to_optimize – The set of input arguments that correspond to the derivative in func. Must be a subset of keys_to_func. If empty, then all keys_to_func are optimized.
-
Factor(const SparseJacobianFunc &jacobian_func, const std::vector<Key> &keys_to_func, const std::vector<Key> &keys_to_optimize = {})
-
void Linearize(const Values<Scalar> &values, VectorX<Scalar> *residual, const std::vector<index_entry_t> *maybe_index_entry_cache = nullptr) const
Evaluate the factor at the given linearization point and output just the numerical values of the residual.
- Parameters:
maybe_index_entry_cache – Optional. If provided, should be the index entries for each of the inputs to the factor in the given Values. For repeated linearization, caching this prevents repeated hash lookups. Can be computed as
values.CreateIndex(factor.AllKeys()).entries
.
-
void Linearize(const Values<Scalar> &values, VectorX<Scalar> *residual, MatrixX<Scalar> *jacobian, const std::vector<index_entry_t> *maybe_index_entry_cache = nullptr) const
Evaluate the factor at the given linearization point and output just the numerical values of the residual and jacobian.
This overload can only be called if IsSparse() is false; otherwise, it will throw
- Parameters:
maybe_index_entry_cache – Optional. If provided, should be the index entries for each of the inputs to the factor in the given Values. For repeated linearization, caching this prevents repeated hash lookups. Can be computed as
values.CreateIndex(factor.AllKeys()).entries
.
-
void Linearize(const Values<Scalar> &values, VectorX<Scalar> *residual, Eigen::SparseMatrix<Scalar> *jacobian, const std::vector<index_entry_t> *maybe_index_entry_cache = nullptr) const
Evaluate the factor at the given linearization point and output just the numerical values of the residual and jacobian.
This overload can only be called if IsSparse() is true; otherwise, it will throw
- Parameters:
maybe_index_entry_cache – Optional. If provided, should be the index entries for each of the inputs to the factor in the given Values. For repeated linearization, caching this prevents repeated hash lookups. Can be computed as
values.CreateIndex(factor.AllKeys()).entries
.
-
void Linearize(const Values<Scalar> &values, LinearizedDenseFactor &linearized_factor, const std::vector<index_entry_t> *maybe_index_entry_cache = nullptr) const
Evaluate the factor at the given linearization point and output a LinearizedDenseFactor that contains the numerical values of the residual, jacobian, hessian, and right-hand-side.
This overload can only be called if IsSparse() is false; otherwise, it will throw
- Parameters:
maybe_index_entry_cache – Optional. If provided, should be the index entries for each of the inputs to the factor in the given Values. For repeated linearization, caching this prevents repeated hash lookups. Can be computed as
values.CreateIndex(factor.AllKeys()).entries
.
-
LinearizedDenseFactor Linearize(const Values<Scalar> &values, const std::vector<index_entry_t> *maybe_index_entry_cache = nullptr) const
Evaluate the factor at the given linearization point and output a LinearizedDenseFactor that contains the numerical values of the residual, jacobian, hessian, and right-hand-side.
This overload can only be called if IsSparse() is false; otherwise, it will throw
- Parameters:
maybe_index_entry_cache – Optional. If provided, should be the index entries for each of the inputs to the factor in the given Values. For repeated linearization, caching this prevents repeated hash lookups. Can be computed as
values.CreateIndex(factor.AllKeys()).entries
.
-
void Linearize(const Values<Scalar> &values, LinearizedSparseFactor &linearized_factor, const std::vector<index_entry_t> *maybe_index_entry_cache = nullptr) const
Evaluate the factor at the given linearization point and output a LinearizedDenseFactor that contains the numerical values of the residual, jacobian, hessian, and right-hand-side.
This overload can only be called if IsSparse is true; otherwise, it will throw
- Parameters:
maybe_index_entry_cache – Optional. If provided, should be the index entries for each of the inputs to the factor in the given Values. For repeated linearization, caching this prevents repeated hash lookups. Can be computed as
values.CreateIndex(factor.AllKeys()).entries
.
-
inline bool IsSparse() const
Does this factor use a sparse jacobian/hessian matrix?
Public Static Functions
-
template<typename Functor>
static Factor Jacobian(Functor &&func, const std::vector<Key> &keys_to_func, const std::vector<Key> &keys_to_optimize = {}) Create from a function that computes the jacobian. The hessian will be computed using the Gauss Newton approximation:
H = J.T * J rhs = J.T * b
This version handles a variety of functors that take in individual input arguments rather than a Values object - the last two arguments to
func
should be outputs for the residual and jacobian; arguments before that should be inputs tofunc
.If generating this factor from a single Python function, you probably want to use the Factor::Hessian constructor instead (it’ll likely result in faster linearization). If you really want to generate a factor and use this constructor,
func
can be generated easily by creating a Codegen object from a Python function which returns the residual, then calling with_linearization with linearization_mode=STACKED_JACOBIANSee
symforce_factor_test.cc
for many examples.- Parameters:
keys_to_func – The set of input arguments, in order, accepted by func.
keys_to_optimize – The set of input arguments that correspond to the derivative in func. Must be a subset of keys_to_func. If empty, then all keys_to_func are optimized.
-
template<typename Functor>
static Factor Hessian(Functor &&func, const std::vector<Key> &keys_to_func, const std::vector<Key> &keys_to_optimize = {}) Create from a functor that computes the full linearization, but takes in individual input arguments rather than a Values object. The last four arguments to
func
should be outputs for the residual, jacobian, hessian, and rhs; arguments before that should be inputs tofunc
.This should be used in cases where computing J^T J using a matrix multiplication is slower than evaluating J^T J symbolically; for instance, if the jacobian is sparse or J^T J has structure so that CSE is very effective.
func
can be generated easily by creating a Codegen object from a Python function which returns the residual, then calling with_linearization with linearization_mode=FULL_LINEARIZATION (the default)See
symforce_factor_test.cc
for many examples.- Parameters:
keys_to_func – The set of input arguments, in order, accepted by func.
keys_to_optimize – The set of input arguments that correspond to the derivative in func. Must be a subset of keys_to_func. If empty, then all keys_to_func are optimized.
-
using Scalar = ScalarType
-
struct linearized_sparse_factor_t#
Public Members
-
Eigen::VectorXd residual#
-
Eigen::SparseMatrix<double> jacobian#
-
Eigen::SparseMatrix<double> hessian#
-
Eigen::VectorXd rhs#
Public Static Functions
-
static inline constexpr const char *getTypeName()#
-
Eigen::VectorXd residual#
-
struct linearized_sparse_factorf_t#
Public Members
-
Eigen::VectorXf residual#
-
Eigen::SparseMatrix<float> jacobian#
-
Eigen::SparseMatrix<float> hessian#
-
Eigen::VectorXf rhs#
Public Static Functions
-
static inline constexpr const char *getTypeName()#
-
Eigen::VectorXf residual#
-
template<typename _S>
struct LinearizedDenseFactorTypeHelper#
-
template<typename _S>
struct LinearizedSparseFactorTypeHelper#
-
template<typename Scalar>