Namespace sym#
-
namespace sym
Typedefs
-
template<typename Scalar>
using SparseLinearization = Linearization<Eigen::SparseMatrix<Scalar>>#
-
using SparseLinearizationd = SparseLinearization<double>#
-
using SparseLinearizationf = SparseLinearization<float>#
-
template<typename Scalar>
using DenseLinearization = Linearization<MatrixX<Scalar>>#
-
using DenseLinearizationd = DenseLinearization<double>#
-
using DenseLinearizationf = DenseLinearization<float>#
-
template<typename Scalar>
using SparseOptimizationStats = OptimizationStats<Eigen::SparseMatrix<Scalar>>#
-
template<typename Scalar>
using DenseOptimizationStats = OptimizationStats<MatrixX<Scalar>>#
-
using OptimizationStatsd = SparseOptimizationStats<double>#
-
using OptimizationStatsf = SparseOptimizationStats<float>#
-
template<typename T>
using remove_cvref_t = typename remove_cvref<T>::type
-
using Valuesd = Values<double>
-
using Valuesf = Values<float>
-
using ImuWithGravityFactord = ImuWithGravityFactor<double>#
-
using ImuWithGravityFactorf = ImuWithGravityFactor<float>#
-
using ImuWithGravityDirectionFactord = ImuWithGravityDirectionFactor<double>#
-
using ImuWithGravityDirectionFactorf = ImuWithGravityDirectionFactor<float>#
-
using ImuPreintegratord = ImuPreintegrator<double>#
-
using ImuPreintegratorf = ImuPreintegrator<float>#
-
using PreintegratedImuMeasurementsd = PreintegratedImuMeasurements<double>#
-
using PreintegratedImuMeasurementsf = PreintegratedImuMeasurements<float>#
Functions
-
inline std::string FormatFailure(const char *error, const char *func, const char *file, int line)#
Format an assertion failure.
-
template<typename ...T>
inline std::string FormatFailure(const char *error, const char *func, const char *file, int line, const char *fmt, T&&... args)# Format an assertion failure with a custom message, and optional additional things to format into the message.
-
template<typename Scalar>
void DumpGraph(const std::string &name, const std::vector<Key> &keys, const std::vector<Factor<Scalar>> &factors, std::ostream &out)# Write the factor graph in .dot format to the given stream
The factor graph is represented as
keys
andfactors
. For example, these can be obtained from anOptimizer
asoptimizer.Keys()
andoptimizer.Factors()
.- Parameters:
name – [in] The name of the graph (e.g.
optimizer.GetName()
)keys – [in] The keys in the graph (e.g.
optimizer.Keys()
)factors – [in] The factors in the graph (e.g.
optimizer.Factors()
)out – [out] The stream to write the graph to
- template void DumpGraph (const std::string &name, const std::vector< Key > &keys, const std::vector< Factor< double >> &factors, std::ostream &out)
- template void DumpGraph (const std::string &name, const std::vector< Key > &keys, const std::vector< Factor< float >> &factors, std::ostream &out)
-
template<typename Scalar>
std::ostream &operator<<(std::ostream &os, const sym::Factor<Scalar> &factor)#
-
std::ostream &operator<<(std::ostream &os, const sym::linearized_sparse_factor_t &factor)#
-
std::ostream &operator<<(std::ostream &os, const sym::linearized_sparse_factorf_t &factor)#
-
template<typename Scalar, typename Compare>
std::vector<Key> ComputeKeysToOptimize(const std::vector<Factor<Scalar>> &factors, Compare key_compare)# Compute the combined set of keys to optimize from the given factors. Order using the given comparison function.
-
template<typename Scalar>
std::vector<Key> ComputeKeysToOptimize(const std::vector<Factor<Scalar>> &factors)#
-
std::ostream &operator<<(std::ostream &os, const sym::Key &key)#
Print implementation for Key.
Examples:
Key('C', 13) -> "C_13" Key('f') -> "f" Key('f', 32, 2) -> "f_32_2" Key('A', -2, 123) -> "A_n2_123" Key() -> "NULLKEY"
-
template<typename Scalar>
sparse_matrix_structure_t GetSparseStructure(const Eigen::SparseMatrix<Scalar> &matrix)# Returns the sparse matrix structure of matrix.
-
template<typename Derived>
sparse_matrix_structure_t GetSparseStructure(const Eigen::EigenBase<Derived> &matrix)# Return a default initialized sparse structure because arg is dense.
-
template<typename MatrixType, typename Scalar, typename std::enable_if_t<kIsSparseEigenType<MatrixType>, bool> = true>
Eigen::Map<const Eigen::SparseMatrix<Scalar>> MatrixViewFromSparseStructure(const sparse_matrix_structure_t &structure, const MatrixX<Scalar> &values)# Returns the reconstructed matrix from the stored values and sparse_matrix_structure_t
Useful for reconstructing the jacobian for an iteration in the OptimizationStats
The result contains references to structure and values, so its lifetime must be shorter than those.
- Parameters:
rows – The number of rows in the matrix (must match structure.shape for sparse matrices)
cols – The number of columns in the matrix (must match structure.shape for sparse matrices)
structure – The sparsity pattern of the matrix, as obtained from GetSparseStructure or stored in OptimizationStats::iterations. For dense matrices, this should be empty.
values – The coefficients of the matrix; flat for sparse matrices, 2d for dense
- Template Parameters:
MatrixType – The type of the matrix used to decide if the result is sparse or dense. This is not otherwise used
Scalar – The scalar type of the result (must match the scalar type of values)
-
template<typename MatrixType, typename Scalar, typename std::enable_if_t<kIsEigenType<MatrixType>, bool> = true>
Eigen::Map<const MatrixX<Scalar>> MatrixViewFromSparseStructure(const sparse_matrix_structure_t &structure, const MatrixX<Scalar> &values)# Returns the reconstructed matrix from the stored values and sparse_matrix_structure_t
Useful for reconstructing the jacobian for an iteration in the OptimizationStats
The result contains references to structure and values, so its lifetime must be shorter than those.
- Parameters:
rows – The number of rows in the matrix (must match structure.shape for sparse matrices)
cols – The number of columns in the matrix (must match structure.shape for sparse matrices)
structure – The sparsity pattern of the matrix, as obtained from GetSparseStructure or stored in OptimizationStats::iterations. For dense matrices, this should be empty.
values – The coefficients of the matrix; flat for sparse matrices, 2d for dense
- Template Parameters:
MatrixType – The type of the matrix used to decide if the result is sparse or dense. This is not otherwise used
Scalar – The scalar type of the result (must match the scalar type of values)
-
template<typename Scalar>
Eigen::Map<const VectorX<Scalar>> JacobianValues(const Eigen::SparseMatrix<Scalar> &matrix)# Returns coefficients of matrix. Overloads exist for both dense and sparse matrices to make writing generic code easier.
This version returns the non-zero values of an Eigen::SparseMatrix
Note: it returns a map, so be careful about mutating or disposing of matrix before you are finished with the output.
-
template<typename Scalar>
const MatrixX<Scalar> &JacobianValues(const MatrixX<Scalar> &matrix)# Returns coefficients of matrix. Overloads exist for both dense and sparse matrices to make writing generic code easier.
Returns a const-ref to the argument.
-
template<typename Scalar>
SparseLinearization<Scalar> Linearize(const std::vector<Factor<Scalar>> &factors, const Values<Scalar> &values, const std::vector<Key> &keys_to_optimize = {}, const std::string &linearizer_name = "Linearizer")# Free function as an alternate way to call the Linearizer
-
template<typename Scalar, typename NonlinearSolverType = LevenbergMarquardtSolver<Scalar>>
Optimizer<Scalar, NonlinearSolverType>::Stats Optimize(const optimizer_params_t ¶ms, const std::vector<Factor<Scalar>> &factors, Values<Scalar> &values, const Scalar epsilon = kDefaultEpsilon<Scalar>)# Simple wrapper to make it one function call.
-
template<typename T>
T Interpolate(const T &a, const T &b, const typename StorageOps<T>::Scalar t, const typename StorageOps<T>::Scalar epsilon = kDefaultEpsilon<typename StorageOps<T>::Scalar>) Interpolation between Lie group elements a and b. Result is a linear interpolation by coefficient t (in [0, 1]) in the tangent space around a
This function version will not always be usable for passing into things that expect a functor callable with three arguments; for those applications, use sym::Interpolator<T>{}
-
template<typename F, typename X>
auto NumericalDerivative(const F f, const X &x, const typename sym::StorageOps<X>::Scalar epsilon = kDefaultEpsilon<typename StorageOps<X>::Scalar>, const typename sym::StorageOps<X>::Scalar delta = 1e-2f) Compute the numerical derivative of a function using a central difference approximation
TODO(aaron): Add a higher-order approximation to the derivative either as an option or as the default
- Parameters:
f – The function to differentiate
x – Input at which to calculate the derivative
epsilon – Epsilon for Lie Group operations
delta – Derivative step size
-
template<typename T>
std::enable_if_t<kIsEigenType<T>, bool> IsApprox(const T &a, const T &b, const typename StorageOps<T>::Scalar epsilon)
-
template<typename T>
std::enable_if_t<!kIsEigenType<T>, bool> IsApprox(const T &a, const T &b, const typename StorageOps<T>::Scalar epsilon)
-
template<typename Scalar>
std::ostream &operator<<(std::ostream &os, const Values<Scalar> &v) Prints entries with their keys, data slices, and values, like:
Valuesd entries=4 array=8 storage_dim=7 tangent_dim=6 R_1 [0:4] --> <Rot3d [0.563679, 0.0939464, 0, 0.820634]> f_1 [5:6] --> 4.2 f_2 [6:7] --> 4.3 d_1 [7:8] --> 4.3 >
-
template<typename Scalar>
class DenseCholeskySolver# - #include <dense_cholesky_solver.h>
A thin wrapper around Eigen::LDLT for use in nonlinear solver classes like sym::LevenbergMarquardtSolver.
Public Types
Public Functions
-
inline bool Factorize(const MatrixType &A)#
Decompose A into A = P^T * L * D * L^T * P and store internally.
- Pre:
A is a symmetric positive definite matrix.
- Returns:
true if factorization succeeded, and false if failed.
-
template<typename Rhs>
inline RhsType Solve(const Eigen::MatrixBase<Rhs> &b) const# - Returns:
x for A x = b, where x and b are dense
- Pre:
this->Factorize has already been called and succeeded.
-
template<typename Rhs>
inline void SolveInPlace(Eigen::MatrixBase<Rhs> &b) const# Solves in place for x in A x = b, where x and b are dense
- Pre:
this->Factorize has already been called and succeeded.
-
inline auto L() const#
- Returns:
the lower triangular matrix L such that P^T * L * D * L^T * P = A, where A is the last matrix to have been factorized with this->Factorize and D is a diagonal matrix with positive diagonal entries, and P is a permutation matrix.
- Pre:
this->Factorize has already been called and succeeded.
-
inline auto D() const#
- Returns:
the diagonal matrix D such that P^T * L * D * L^T * P = A, where A is the last matrix to have been factorized with this->Factorize, L is lower triangular with unit diagonal, and P is a permutation matrix
- Pre:
this->Factorize has already been called and succeeded.
-
inline auto Permutation() const#
- Returns:
the permutation matrix P such that P^T * L * D * L^T * P = A, where A is the last matrix to have been factorized with this->Factorize, L is lower triangular with unit diagonal, and D is a diagonal matrix
- Pre:
this->Factorize has already been called and succeeded.
-
inline bool Factorize(const MatrixType &A)#
-
template<typename Scalar>
class DenseLinearizer# Public Types
-
using LinearizationType = DenseLinearization<Scalar>#
Public Functions
-
DenseLinearizer(const std::string &name, const std::vector<Factor<Scalar>> &factors, const std::vector<Key> &key_order = {}, bool include_jacobians = false, bool debug_checks = false)#
Construct a Linearizer from factors and optional keys
- Parameters:
name – name of linearizer used for debug information
factors – Only stores a pointer, MUST be in scope for the lifetime of this object!
key_order – If provided, acts as an ordered set of keys that form the state vector to optimize. Can equal the set of all factor keys or a subset of all factor keys. If not provided, it is computed from all keys for all factors using a default ordering.
include_jacobians – Relinearize only allocates and fills out the jacobian if true.
debug_checks – Whether to perform additional sanity checks for NaNs. This uses additional compute but not additional memory except for logging.
-
bool IsInitialized() const#
Returns whether Relinearize() has already been called once.
Matters because many calculations need to be called only on the first linearization that are then cached for subsequent use. Also, if Relinearize() has already been called, then the matrices in the linearization are expected to already be allocated to the right size.
-
const std::vector<Key> &Keys() const#
The keys to optimize, in the order they appear in the state vector (i.e., in rhs).
-
const std::unordered_map<key_t, index_entry_t> &StateIndex() const#
A map from all optimized keys in the problem to an index entry for the corresponding optimized values (where the offset is into the problem state vector, i.e., the rhs of a linearization).
Only read if IsInitialized() returns true (i.e., after the first call to Relinearize).
-
void Relinearize(const Values<Scalar> &values, DenseLinearization<Scalar> &linearization)#
Update linearization at a new evaluation point. This is more efficient than reconstructing this object repeatedly. On the first call, it will allocate memory and perform analysis needed for efficient repeated relinearization.
On the first call to Relinearize, the matrices in linearization will be allocated and sized correctly. On subsequent calls, the matrices of linearization are expected to already be allocated and sized correctly.
TODO(aaron): This should be const except that it can initialize the object
-
using LinearizationType = DenseLinearization<Scalar>#
-
template<typename Scalar, typename EigenSolver>
class EigenSparseSolver# - #include <eigen_sparse_solver.h>
A thin wrapper around Eigen’s Sparse Solver interface for use in nonlinear solver classes like sym::LevenbergMarquardtSolver.
Can be specialized with anything satisfying the SparseSolver concept.
For example, can be used like:
using LinearSolver = sym::EigenSparseSolver<double, Eigen::CholmodDecomposition<Eigen::SparseMatrix<double>>>; using NonlinearSolver = sym::LevenbergMarquardtSolver<double, LinearSolver>; using Optimizer = sym::Optimizer<double, NonlinearSolver>; Optimizer optimizer{...};
Public Types
Public Functions
-
inline bool Factorize(const MatrixType &A)#
Factorize A and store internally.
- Parameters:
A – a symmetric positive definite matrix.
- Returns:
true if factorization succeeded, and false if failed.
-
template<typename Rhs>
inline RhsType Solve(const Eigen::MatrixBase<Rhs> &b) const# - Returns:
x for A x = b, where x and b are dense
- Pre:
this->Factorize has already been called and succeeded.
-
template<typename Rhs>
inline void SolveInPlace(Eigen::MatrixBase<Rhs> &b) const# Solves in place for x in A x = b, where x and b are dense
Eigen solvers cannot actually solve in place, so this solves, then copies back into the argument.
- Pre:
this->Factorize has already been called and succeeded.
-
inline MatrixType L() const#
- Returns:
the lower triangular matrix L such that P^T * L * D * L^T * P = A, where A is the last matrix to have been factorized with this->Factorize and D is a diagonal matrix with positive diagonal entries, and P is a permutation matrix.
- Pre:
this->Factorize has already been called and succeeded.
-
inline MatrixType D() const#
- Returns:
the diagonal matrix D such that P^T * L * D * L^T * P = A, where A is the last matrix to have been factorized with this->Factorize, L is lower triangular with unit diagonal, and P is a permutation matrix
- Pre:
this->Factorize has already been called and succeeded.
-
inline Eigen::PermutationMatrix<Eigen::Dynamic> Permutation() const#
- Returns:
the permutation matrix P such that P^T * L * D * L^T * P = A, where A is the last matrix to have been factorized with this->Factorize, L is lower triangular with unit diagonal, and D is a diagonal matrix
- Pre:
this->Factorize has already been called and succeeded.
-
inline void AnalyzeSparsityPattern(const MatrixType &A)#
Defined to satisfy interface. No analysis is needed so is a no-op.
-
inline bool Factorize(const MatrixType &A)#
-
template<typename ScalarType>
class Factor# - #include <factor.h>
A residual term for optimization.
Created from a function and a set of Keys that act as inputs. Given a Values as an evaluation point, generates a linear approximation to the residual function. Contains templated helper constructors to simplify creation.
Public Types
-
using Scalar = ScalarType#
-
using LinearizedDenseFactor = typename LinearizedDenseFactorTypeHelper<Scalar>::Type#
-
using LinearizedSparseFactor = typename LinearizedSparseFactorTypeHelper<Scalar>::Type#
-
template<typename JacobianMatrixType>
using JacobianFunc = std::function<void(const Values<Scalar>&, const std::vector<index_entry_t>&, VectorX<Scalar>*, JacobianMatrixType*)>#
-
using DenseJacobianFunc = JacobianFunc<MatrixX<Scalar>>#
-
using SparseJacobianFunc = JacobianFunc<Eigen::SparseMatrix<Scalar>>#
-
template<typename MatrixType>
using HessianFunc = std::function<void(const Values<Scalar>&, const std::vector<index_entry_t>&, VectorX<Scalar>*, MatrixType*, MatrixType*, VectorX<Scalar>*)>#
-
using DenseHessianFunc = HessianFunc<MatrixX<Scalar>>#
-
using SparseHessianFunc = HessianFunc<Eigen::SparseMatrix<Scalar>>#
Public Functions
-
Factor() = default#
-
Factor(DenseHessianFunc hessian_func, const std::vector<Key> &keys_to_func, const std::vector<Key> &keys_to_optimize = {})#
Create directly from a (dense/sparse) hessian functor. This is the lowest-level constructor.
- Parameters:
keys_to_func – The set of input arguments, in order, accepted by func.
keys_to_optimize – The set of input arguments that correspond to the derivative in func. Must be a subset of keys_to_func. If empty, then all keys_to_func are optimized.
-
Factor(SparseHessianFunc hessian_func, const std::vector<Key> &keys_to_func, const std::vector<Key> &keys_to_optimize = {})#
-
Factor(const DenseJacobianFunc &jacobian_func, const std::vector<Key> &keys_to_func, const std::vector<Key> &keys_to_optimize = {})#
Create from a function that computes the (dense/sparse) jacobian. The hessian will be computed using the Gauss Newton approximation:
H = J.T * J rhs = J.T * b
- Parameters:
keys_to_func – The set of input arguments, in order, accepted by func.
keys_to_optimize – The set of input arguments that correspond to the derivative in func. Must be a subset of keys_to_func. If empty, then all keys_to_func are optimized.
-
Factor(const SparseJacobianFunc &jacobian_func, const std::vector<Key> &keys_to_func, const std::vector<Key> &keys_to_optimize = {})#
-
void Linearize(const Values<Scalar> &values, VectorX<Scalar> *residual, const std::vector<index_entry_t> *maybe_index_entry_cache = nullptr) const#
Evaluate the factor at the given linearization point and output just the numerical values of the residual.
- Parameters:
maybe_index_entry_cache – Optional. If provided, should be the index entries for each of the inputs to the factor in the given Values. For repeated linearization, caching this prevents repeated hash lookups. Can be computed as
values.CreateIndex(factor.AllKeys()).entries
.
-
void Linearize(const Values<Scalar> &values, VectorX<Scalar> *residual, MatrixX<Scalar> *jacobian, const std::vector<index_entry_t> *maybe_index_entry_cache = nullptr) const#
Evaluate the factor at the given linearization point and output just the numerical values of the residual and jacobian.
This overload can only be called if IsSparse() is false; otherwise, it will throw
- Parameters:
maybe_index_entry_cache – Optional. If provided, should be the index entries for each of the inputs to the factor in the given Values. For repeated linearization, caching this prevents repeated hash lookups. Can be computed as
values.CreateIndex(factor.AllKeys()).entries
.
-
void Linearize(const Values<Scalar> &values, VectorX<Scalar> *residual, Eigen::SparseMatrix<Scalar> *jacobian, const std::vector<index_entry_t> *maybe_index_entry_cache = nullptr) const#
Evaluate the factor at the given linearization point and output just the numerical values of the residual and jacobian.
This overload can only be called if IsSparse() is true; otherwise, it will throw
- Parameters:
maybe_index_entry_cache – Optional. If provided, should be the index entries for each of the inputs to the factor in the given Values. For repeated linearization, caching this prevents repeated hash lookups. Can be computed as
values.CreateIndex(factor.AllKeys()).entries
.
-
void Linearize(const Values<Scalar> &values, LinearizedDenseFactor &linearized_factor, const std::vector<index_entry_t> *maybe_index_entry_cache = nullptr) const#
Evaluate the factor at the given linearization point and output a LinearizedDenseFactor that contains the numerical values of the residual, jacobian, hessian, and right-hand-side.
This overload can only be called if IsSparse() is false; otherwise, it will throw
- Parameters:
maybe_index_entry_cache – Optional. If provided, should be the index entries for each of the inputs to the factor in the given Values. For repeated linearization, caching this prevents repeated hash lookups. Can be computed as
values.CreateIndex(factor.AllKeys()).entries
.
-
LinearizedDenseFactor Linearize(const Values<Scalar> &values, const std::vector<index_entry_t> *maybe_index_entry_cache = nullptr) const#
Evaluate the factor at the given linearization point and output a LinearizedDenseFactor that contains the numerical values of the residual, jacobian, hessian, and right-hand-side.
This overload can only be called if IsSparse() is false; otherwise, it will throw
- Parameters:
maybe_index_entry_cache – Optional. If provided, should be the index entries for each of the inputs to the factor in the given Values. For repeated linearization, caching this prevents repeated hash lookups. Can be computed as
values.CreateIndex(factor.AllKeys()).entries
.
-
void Linearize(const Values<Scalar> &values, LinearizedSparseFactor &linearized_factor, const std::vector<index_entry_t> *maybe_index_entry_cache = nullptr) const#
Evaluate the factor at the given linearization point and output a LinearizedDenseFactor that contains the numerical values of the residual, jacobian, hessian, and right-hand-side.
This overload can only be called if IsSparse is true; otherwise, it will throw
- Parameters:
maybe_index_entry_cache – Optional. If provided, should be the index entries for each of the inputs to the factor in the given Values. For repeated linearization, caching this prevents repeated hash lookups. Can be computed as
values.CreateIndex(factor.AllKeys()).entries
.
-
inline bool IsSparse() const#
Does this factor use a sparse jacobian/hessian matrix?
Public Static Functions
-
template<typename Functor>
static Factor Jacobian(Functor &&func, const std::vector<Key> &keys_to_func, const std::vector<Key> &keys_to_optimize = {})# Create from a function that computes the jacobian. The hessian will be computed using the Gauss Newton approximation:
H = J.T * J rhs = J.T * b
This version handles a variety of functors that take in individual input arguments rather than a Values object - the last two arguments to
func
should be outputs for the residual and jacobian; arguments before that should be inputs tofunc
.If generating this factor from a single Python function, you probably want to use the Factor::Hessian constructor instead (it’ll likely result in faster linearization). If you really want to generate a factor and use this constructor,
func
can be generated easily by creating a Codegen object from a Python function which returns the residual, then calling with_linearization with linearization_mode=STACKED_JACOBIANSee
symforce_factor_test.cc
for many examples.- Parameters:
keys_to_func – The set of input arguments, in order, accepted by func.
keys_to_optimize – The set of input arguments that correspond to the derivative in func. Must be a subset of keys_to_func. If empty, then all keys_to_func are optimized.
-
template<typename Functor>
static Factor Hessian(Functor &&func, const std::vector<Key> &keys_to_func, const std::vector<Key> &keys_to_optimize = {})# Create from a functor that computes the full linearization, but takes in individual input arguments rather than a Values object. The last four arguments to
func
should be outputs for the residual, jacobian, hessian, and rhs; arguments before that should be inputs tofunc
.This should be used in cases where computing J^T J using a matrix multiplication is slower than evaluating J^T J symbolically; for instance, if the jacobian is sparse or J^T J has structure so that CSE is very effective.
func
can be generated easily by creating a Codegen object from a Python function which returns the residual, then calling with_linearization with linearization_mode=FULL_LINEARIZATION (the default)See
symforce_factor_test.cc
for many examples.- Parameters:
keys_to_func – The set of input arguments, in order, accepted by func.
keys_to_optimize – The set of input arguments that correspond to the derivative in func. Must be a subset of keys_to_func. If empty, then all keys_to_func are optimized.
-
using Scalar = ScalarType#
-
template<typename T>
struct function_traits Subclassed by sym::function_traits< const T >, sym::function_traits< T & >, sym::function_traits< T && >, sym::function_traits< T * >, sym::function_traits< volatile T >
-
template<typename T>
struct function_traits<const T> : public sym::function_traits<T>
-
template<typename ReturnType, typename ...Args>
struct function_traits<ReturnType(Args...)> Subclassed by sym::function_traits< ReturnType(ClassType::*)(Args…) const >, sym::function_traits< ReturnType(ClassType::*)(Args…)>
Public Types
-
using return_type = ReturnType
-
using base_return_type = typename std::decay_t<return_type>
-
template<std::size_t N>
struct arg
-
using return_type = ReturnType
- template<typename ClassType, typename ReturnType, typename... Args> *)(Args...) const > : public sym::function_traits< ReturnType(Args...)>
- template<typename ClassType, typename ReturnType, typename... Args> *)(Args...)> : public sym::function_traits< ReturnType(Args...)>
-
template<typename ReturnTypeT, typename ...Args, typename ...FArgs>
struct function_traits<SYM_BIND_TYPE> Public Types
-
using return_type = ReturnTypeT
-
using base_return_type = typename std::decay_t<return_type>
-
template<std::size_t N>
struct arg
-
using return_type = ReturnTypeT
-
template<typename T>
struct function_traits<T&> : public sym::function_traits<T>
-
template<typename T>
struct function_traits<T&&> : public sym::function_traits<T>
-
template<typename T>
struct function_traits<T*> : public sym::function_traits<T>
-
template<typename T>
struct function_traits<volatile T> : public sym::function_traits<T>
-
template<typename BaseOptimizerType>
class GncOptimizer : public BaseOptimizerType# - #include <gnc_optimizer.h>
Subclass of Optimizer for using Graduated Non-Convexity (GNC)
Assumes the convexity of the cost function is controlled by a hyperparameter mu. When mu == 0 the cost function should be convex and as mu goes to 1 the cost function should smoothly transition to a robust cost.
Public Types
-
using BaseOptimizer = BaseOptimizerType#
-
using Scalar = typename BaseOptimizer::Scalar#
Public Functions
-
template<typename ...OptimizerArgs>
inline GncOptimizer(const optimizer_params_t &optimizer_params, const optimizer_gnc_params_t &gnc_params, const Key &gnc_mu_key, OptimizerArgs&&... args)# Constructor that copies in factors and keys
-
virtual ~GncOptimizer() = default#
-
inline void Optimize(Values<Scalar> &values, int num_iterations, bool populate_best_linearization, typename BaseOptimizer::Stats &stats) override#
-
using BaseOptimizer = BaseOptimizerType#
-
template<typename Scalar>
class ImuFactor# - #include <imu_factor.h>
A factor for using on-manifold IMU preintegration in a SymForce optimization problem.
By on-manifold, it is meant that the angular velocity measurements are composed as rotations rather than tangent-space approximations.
Assumes IMU bias is constant over the preintegration window. Does not recompute the preintegrated measurements when the IMU bias estimate changes during optimization, but rather uses a first order approximation linearized at the IMU biases given during preintegration.
The gravity argument should be
[0, 0, -g]
(where g is 9.8, assuming your world frame is gravity aligned so that the -z direction points towards the Earth) unless your IMU reads 0 acceleration while stationary, in which case it should be[0, 0, 0]
.More generally, the gravity argument should yield the true acceleration when added to the accelerometer measurement (after the measurement has been converted to the world frame).
Is a functor so as to store the preintegrated IMU measurements between two times. The residual computed is the local-coordinate difference between the final state (pose and velocity) and the state you would expect given the initial state and the IMU measurements.
Based heavily on the on manifold ImuFactor found in GTSAM and on the paper:
Christian Forster, Luca Carlone, Frank Dellaert, and Davide Scaramuzza, “IMU Preintegration on Manifold for Efficient Visual-Inertial Maximum-a-Posteriori Estimation”, Robotics: Science and Systems (RSS), 2015.
Example Usage:
enum Var : char { POSE = 'p', // Pose3d VELOCITY = 'v', // Vector3d ACCEL_BIAS = 'A', // Vector3d GYRO_BIAS = 'G', // Vector3d GRAVITY = 'g', // Vector3d EPSILON = 'e' // Scalar }; struct ImuMeasurement { Eigen::Vector3d acceleration; Eigen::Vector3d angular_velocity; double duration; }; int main() { // Dummy function declarations std::vector<ImuMeasurement> GetMeasurementsBetween(double start_time, double end_time); std::vector<double> GetKeyFrameTimes(); Eigen::Vector3d GetAccelBiasEstimate(double time); Eigen::Vector3d GetGyroBiasEstimate(double time); void AppendOtherFactors(std::vector<sym::Factord> & factors); // Example accelerometer and gyroscope covariances const Eigen::Vector3d accel_cov = Eigen::Vector3d::Constant(1e-5); const Eigen::Vector3d gyro_cov = Eigen::Vector3d::Constant(1e-5); std::vector<sym::Factord> factors; // GetKeyFrameTimes assumed to return at least 2 times std::vector<double> key_frame_times = GetKeyFrameTimes(); // Integrating Imu measurements between keyframes, creating an ImuFactor for each for (int i = 0; i < static_cast<int>(key_frame_times.size()) - 1; i++) { const double start_time = key_frame_times[i]; const double end_time = key_frame_times[i + 1]; const std::vector<ImuMeasurement> measurements = GetMeasurementsBetween(start_time, end_time); // ImuPreintegrator should be initialized with the best guesses for the IMU biases sym::ImuPreintegrator<double> integrator(GetAccelBiasEstimate(start_time), GetGyroBiasEstimate(start_time)); for (const ImuMeasurement& meas : measurements) { integrator.IntegrateMeasurement(meas.acceleration, meas.angular_velocity, accel_cov, gyro_cov, meas.duration); } factors.push_back(sym::ImuFactor<double>(integrator) .Factor({{Var::POSE, i}, {Var::VELOCITY, i}, {Var::POSE, i + 1}, {Var::VELOCITY, i + 1}, {Var::ACCEL_BIAS, i}, {Var::GYRO_BIAS, i}, {Var::GRAVITY}, {Var::EPSILON}})); } // Adding any other factors there might be for the problem AppendOtherFactors(factors); sym::Optimizerd optimizer(sym::DefaultOptimizerParams(), factors); // Build Values sym::Valuesd values; for (int i = 0; i < key_frame_times.size(); i++) { values.Set({Var::POSE, i}, sym::Pose3d()); values.Set({Var::VELOCITY, i}, Eigen::Vector3d::Zero()); } for (int i = 0; i < key_frame_times.size() - 1; i++) { values.Set({Var::ACCEL_BIAS, i}, GetAccelBiasEstimate(key_frame_times[i])); values.Set({Var::GYRO_BIAS, i}, GetGyroBiasEstimate(key_frame_times[i])); } // gravity should point towards the direction of acceleration values.Set({Var::GRAVITY}, Eigen::Vector3d(0.0, 0.0, -9.8)); values.Set({Var::EPSILON}, sym::kDefaultEpsilond); // Initialize any other items in values ... optimizer.Optimize(values); }
Public Types
-
using Preintegrator = sym::ImuPreintegrator<Scalar>#
-
using Measurement = sym::PreintegratedImuMeasurements<Scalar>#
Public Functions
-
explicit ImuFactor(const Preintegrator &preintegrator)#
Construct an ImuFactor from a preintegrator.
-
ImuFactor(const Measurement &measurement, const SqrtInformation &sqrt_information)#
Construct an ImuFactor from a (preintegrated) measurement and its corresponding sqrt information.
-
sym::Factor<Scalar> Factor(const std::vector<Key> &keys_to_func) const#
Construct a Factor object that can be passed to an Optimizer object given the keys to be passed to the function.
Convenience method to avoid manually specifying which arguments are optimized.
-
void operator()(const Pose3 &pose_i, const Vector3 &vel_i, const Pose3 &pose_j, const Vector3 &vel_j, const Vector3 &accel_bias_i, const Vector3 &gyro_bias_i, const Vector3 &gravity, Scalar epsilon, Eigen::Matrix<Scalar, 9, 1> *residual = nullptr, Eigen::Matrix<Scalar, 9, 24> *jacobian = nullptr, Eigen::Matrix<Scalar, 24, 24> *hessian = nullptr, Eigen::Matrix<Scalar, 24, 1> *rhs = nullptr) const#
Calculate a between factor on the product manifold of the pose and velocity where the prior is calculated from the preintegrated IMU measurements.
Time step i is the time of the first IMU measurement of the interval. Time step j is the time after the last IMU measurement of the interval.
- Parameters:
pose_i – Pose at time step i (world_T_body)
vel_i – Velocity at time step i (world frame)
pose_j – Pose at time step j (world_T_body)
vel_j – Velocity at time step j (world frame)
accel_bias_i – Bias of the accelerometer measurements between timesteps i and j
gyro_bias_i – Bias of the gyroscope measurements between timesteps i and j
gravity – Acceleration due to gravity (in the same frame as pose_x and vel_x), i.e., the vector which when added to the accelerometer measurements gives the true acceleration (up to bias and noise) of the IMU.
epsilon – epsilon used for numerical stability
residual – [out] The 9dof whitened local coordinate difference between predicted and estimated state
jacobian – [out] (9x24) jacobian of res wrt args pose_i (6), vel_i (3), pose_j (6), vel_j (3), accel_bias_i (3), gyro_bias_i (3)
hessian – [out] (24x24) Gauss-Newton hessian for args pose_i (6), vel_i (3), pose_j (6), vel_j (3), accel_bias_i (3), gyro_bias_i (3)
rhs – [out] (24x1) Gauss-Newton rhs for args pose_i (6), vel_i (3), pose_j (6), vel_j (3), accel_bias_i (3), gyro_bias_i (3)
-
using Preintegrator = sym::ImuPreintegrator<Scalar>#
-
template<typename Scalar>
class ImuPreintegrator# - #include <imu_preintegrator.h>
Class to on-manifold preintegrate IMU measurements for usage in a SymForce optimization problem.
For usage, see the doc-string of ImuFactor in imu_factor.h
Public Types
-
using Vector3 = typename PreintegratedImuMeasurements<Scalar>::Vector3#
-
using Matrix33 = typename PreintegratedImuMeasurements<Scalar>::Matrix33#
Public Functions
-
ImuPreintegrator(const Vector3 &accel_bias, const Vector3 &gyro_bias)#
Initialize with given accel_bias and gyro_bias
-
void IntegrateMeasurement(const Vector3 &measured_accel, const Vector3 &measured_gyro, const Vector3 &accel_cov, const Vector3 &gyro_cov, Scalar dt, Scalar epsilon = kDefaultEpsilon<Scalar>)#
Integrate a new measurement.
Postcondition:
If the orientation, velocity, and position were R0, v0, and p0 at the start of the first integrated IMU measurements, and Rf, vf, and pf are the corresponding values at the end of the measurement described by the arguments, DT is the total time covered by the integrated measurements, and g is the vector of acceleration due to gravity, then
pim.DR -> R0.inverse() * Rf
pim.Dv -> R0.inverse() * (vf - v0 - g * DT)
pim.Dp -> R0.inverse() * (pf - p0 - v0 * DT - 0.5 * g * DT^2)
pim.DX_D_accel_bias -> the derivative of DX wrt the accel bias linearized at pim.accel_bias
pim.DX_D_gyro_bias -> the derivative of DX wrt the gyro bias linearized at pim.gyro_bias
pim.accel_bias -> unchanged
pim.gyro_bias -> unchanged
pim.integrated_dt -> DT
covariance -> the covariance [DR, Dv, Dp] in local coordinates of mean
- Parameters:
measured_accel – the next accelerometer measurement after the last integrated measurement
measured_gyro – the next gyroscope measurement after the last integrated measurement
accel_cov – the covariance of the accelerometer measurement (represented by its diagonal entries)
[(meters / time^2)^2 * time]
gyro_cov – the covariance of the gyroscope measurement (represented by its diagonal entries)
[(radians / time)^2 * time]
dt – the time span over which these measurements were made
-
const PreintegratedImuMeasurements<Scalar> &PreintegratedMeasurements() const#
-
using Vector3 = typename PreintegratedImuMeasurements<Scalar>::Vector3#
-
template<typename Scalar>
class ImuWithGravityDirectionFactor# - #include <imu_factor.h>
A factor for using on-manifold IMU preintegration in a SymForce optimization problem, with the ability to optimize the gravity vector direction.
For full documentation, see ImuFactor.
Public Types
-
using Preintegrator = sym::ImuPreintegrator<Scalar>#
-
using Measurement = sym::PreintegratedImuMeasurements<Scalar>#
Public Functions
-
explicit ImuWithGravityDirectionFactor(const Preintegrator &preintegrator)#
Construct an ImuFactor from a preintegrator.
-
ImuWithGravityDirectionFactor(const Measurement &measurement, const SqrtInformation &sqrt_information)#
Construct an ImuFactor from a (preintegrated) measurement and its corresponding sqrt information.
-
sym::Factor<Scalar> Factor(const std::vector<Key> &keys_to_func) const#
Construct a Factor object that can be passed to an Optimizer object given the keys to be passed to the function.
Convenience method to avoid manually specifying which arguments are optimized.
-
void operator()(const Pose3 &pose_i, const Vector3 &vel_i, const Pose3 &pose_j, const Vector3 &vel_j, const Vector3 &accel_bias_i, const Vector3 &gyro_bias_i, const Unit3<Scalar> &gravity_direction, Scalar gravity_norm, Scalar epsilon, Eigen::Matrix<Scalar, 9, 1> *residual = nullptr, Eigen::Matrix<Scalar, 9, 26> *jacobian = nullptr, Eigen::Matrix<Scalar, 26, 26> *hessian = nullptr, Eigen::Matrix<Scalar, 26, 1> *rhs = nullptr) const#
Calculate a between factor on the product manifold of the pose and velocity where the prior is calculated from the preintegrated IMU measurements.
Time step i is the time of the first IMU measurement of the interval. Time step j is the time after the last IMU measurement of the interval.
- Parameters:
pose_i – Pose at time step i (world_T_body)
vel_i – Velocity at time step i (world frame)
pose_j – Pose at time step j (world_T_body)
vel_j – Velocity at time step j (world frame)
accel_bias_i – Bias of the accelerometer measurements between timesteps i and j
gyro_bias_i – Bias of the gyroscope measurements between timesteps i and j
gravity_direction – When multiplied by gravity_norm, the acceleration due to gravity (in the same frame as pose_x and vel_x), i.e., the vector which when added to the accelerometer measurements gives the true acceleration (up to bias and noise) of the IMU.
gravity_norm – The norm of the gravity vector
epsilon – epsilon used for numerical stability
residual – [out] The 9dof whitened local coordinate difference between predicted and estimated state
jacobian – [out] (9x26) jacobian of res wrt args pose_i (6), vel_i (3), pose_j (6), vel_j (3), accel_bias_i (3), gyro_bias_i (3), gravity_direction (2)
hessian – [out] (26x26) Gauss-Newton hessian for args pose_i (6), vel_i (3), pose_j (6), vel_j (3), accel_bias_i (3), gyro_bias_i (3), gravity_direction (2)
rhs – [out] (26x1) Gauss-Newton rhs for args pose_i (6), vel_i (3), pose_j (6), vel_j (3), accel_bias_i (3), gyro_bias_i (3), gravity_direction (2)
-
using Preintegrator = sym::ImuPreintegrator<Scalar>#
-
template<typename Scalar>
class ImuWithGravityFactor# - #include <imu_factor.h>
A factor for using on-manifold IMU preintegration in a SymForce optimization problem, with the ability to optimize the gravity vector.
For full documentation, see ImuFactor.
Public Types
-
using Preintegrator = sym::ImuPreintegrator<Scalar>#
-
using Measurement = sym::PreintegratedImuMeasurements<Scalar>#
Public Functions
-
explicit ImuWithGravityFactor(const Preintegrator &preintegrator)#
Construct an ImuFactor from a preintegrator.
-
ImuWithGravityFactor(const Measurement &measurement, const SqrtInformation &sqrt_information)#
Construct an ImuFactor from a (preintegrated) measurement and its corresponding sqrt information.
-
sym::Factor<Scalar> Factor(const std::vector<Key> &keys_to_func) const#
Construct a Factor object that can be passed to an Optimizer object given the keys to be passed to the function.
Convenience method to avoid manually specifying which arguments are optimized.
-
void operator()(const Pose3 &pose_i, const Vector3 &vel_i, const Pose3 &pose_j, const Vector3 &vel_j, const Vector3 &accel_bias_i, const Vector3 &gyro_bias_i, const Vector3 &gravity, Scalar epsilon, Eigen::Matrix<Scalar, 9, 1> *residual = nullptr, Eigen::Matrix<Scalar, 9, 27> *jacobian = nullptr, Eigen::Matrix<Scalar, 27, 27> *hessian = nullptr, Eigen::Matrix<Scalar, 27, 1> *rhs = nullptr) const#
Calculate a between factor on the product manifold of the pose and velocity where the prior is calculated from the preintegrated IMU measurements.
Time step i is the time of the first IMU measurement of the interval. Time step j is the time after the last IMU measurement of the interval.
- Parameters:
pose_i – Pose at time step i (world_T_body)
vel_i – Velocity at time step i (world frame)
pose_j – Pose at time step j (world_T_body)
vel_j – Velocity at time step j (world frame)
accel_bias_i – Bias of the accelerometer measurements between timesteps i and j
gyro_bias_i – Bias of the gyroscope measurements between timesteps i and j
gravity – Acceleration due to gravity (in the same frame as pose_x and vel_x), i.e., the vector which when added to the accelerometer measurements gives the true acceleration (up to bias and noise) of the IMU.
epsilon – epsilon used for numerical stability
residual – [out] The 9dof whitened local coordinate difference between predicted and estimated state
jacobian – [out] (9x27) jacobian of res wrt args pose_i (6), vel_i (3), pose_j (6), vel_j (3), accel_bias_i (3), gyro_bias_i (3), gravity (3)
hessian – [out] (27x27) Gauss-Newton hessian for args pose_i (6), vel_i (3), pose_j (6), vel_j (3), accel_bias_i (3), gyro_bias_i (3), gravity (3)
rhs – [out] (27x1) Gauss-Newton rhs for args pose_i (6), vel_i (3), pose_j (6), vel_j (3), accel_bias_i (3), gyro_bias_i (3), gravity (3)
-
using Preintegrator = sym::ImuPreintegrator<Scalar>#
-
template<typename T>
class Interpolator - #include <util.h>
Interpolation between Lie group elements a and b. Result is a linear interpolation by coefficient t (in [0, 1]) in the tangent space around a
Public Types
-
using Scalar = typename sym::StorageOps<T>::Scalar
-
using Scalar = typename sym::StorageOps<T>::Scalar
-
class Key#
- #include <key.h>
Contains a letter plus an integral subscript and superscript. Can construct with a letter, a letter + sub, or a letter + sub + super, but not letter + super.
TODO(hayk): Consider an abstraction where Key contains a type enum.
Public Types
-
using letter_t = char#
Public Functions
-
constexpr Key() = default#
-
inline constexpr Key(const letter_t letter, const subscript_t sub = kInvalidSub, const superscript_t super = kInvalidSuper)#
-
inline constexpr Key(const key_t &key)#
-
inline constexpr subscript_t Sub() const noexcept#
-
inline constexpr superscript_t Super() const noexcept#
-
inline constexpr Key WithSub(const subscript_t sub) const#
-
inline constexpr Key WithSuper(const superscript_t super) const#
-
inline key_t GetLcmType() const noexcept#
Public Static Functions
Public Static Attributes
-
static constexpr subscript_t kInvalidSub = std::numeric_limits<subscript_t>::min()#
-
static constexpr superscript_t kInvalidSuper = std::numeric_limits<superscript_t>::min()#
-
struct LexicalCompare#
- #include <key.h>
Implementation of the Compare spec for use in containers
-
using letter_t = char#
-
template<typename ScalarType, typename LinearSolverType = sym::SparseCholeskySolver<Eigen::SparseMatrix<ScalarType>>>
class LevenbergMarquardtSolver# - #include <levenberg_marquardt_solver.h>
Fast Levenberg-Marquardt solver for nonlinear least squares problems specified by a linearization function. Supports on-manifold optimization and sparse solving, and attempts to minimize allocations after the first iteration.
This assumes the problem structure is the same for the lifetime of the object - if the problem structure changes, create a new LevenbergMarquardtSolver.
TODO(aaron): Analyze what repeated allocations we do have, and get rid of them if possible
Not thread safe! Create one per thread.
Example usage:
constexpr const int M = 9; constexpr const int N = 5; // Create a function that computes the residual (a linear residual for this example) const auto J_MN = sym::RandomNormalMatrix<double, M, N>(gen); const auto linearize_func = [&J_MN](const sym::Valuesd& values, sym::SparseLinearizationd* const linearization) { const auto state_vec = values.At<sym::Vector5d>('v'); linearization->residual = J_MN * state_vec; linearization->hessian_lower = (J_MN.transpose() * J_MN).sparseView(); linearization->jacobian = J_MN.sparseView(); linearization->rhs = J_MN.transpose() * linearization->residual; }; // Create a Values sym::Valuesd values_init{}; values_init.Set('v', (StateVector::Ones() * 100).eval()); // Create a Solver sym::LevenbergMarquardtSolverd solver(params, "", kEpsilon); solver.SetIndex(values_init.CreateIndex({'v'})); solver.Reset(values_init); // Iterate to convergence sym::optimization_stats_t stats; bool should_early_exit = false; while (!should_early_exit) { should_early_exit = solver.Iterate(linearize_func, &stats); } // Get the best values sym::Valuesd values_final = solver.GetBestValues();
The theory:
We start with a nonlinear vector-valued error function that defines an error residual for which we want to minimize the squared norm. The residual is dimension M, the state is N.
residual = f(x)
Define a least squares cost function as the squared norm of the residual:
e(x) = 0.5 * ||f(x)||**2 = 0.5 * f(x).T * f(x)
Take the first order taylor expansion for x around the linearization point x0:
f(x) = f(x0) + f'(x0) * (x - x0) + ...
Plug in to the cost function to get a quadratic:
e(x) ~= 0.5 * (x - x0).T * f'(x0).T * f'(x0) * (x - x0) + f(x0).T * f'(x0) * (x - x0) + 0.5 * f(x0).T * f(x0)
Take derivative with respect to x:
e'(x) = f'(x0).T * f'(x0) * (x - x0) + f'(x0).T * f(x0)
Set to zero to find the minimum value of the quadratic (paraboloid):
0 = f'(x0).T * f'(x0) * (x - x0) + f'(x0).T * f(x0) (x - x0) = - inv(f'(x0).T * f'(x0)) * f'(x0).T * f(x0) x = x0 - inv(f'(x0).T * f'(x0)) * f'(x0).T * f(x0)
Another way to write this is to create some helpful shorthand:
f'(x0) --> jacobian or J (shape = MxN) f (x0) --> bias or b (shape = Mx1) x - x0 --> dx (shape = Nx1)
Rederiving the Gauss-Newton solution:
e(x) ~= 0.5 * dx.T * J.T * J * dx + b.T * J * dx + 0.5 * b.T * b e'(x) = J.T * J * dx + J.T * b x = x0 - inv(J.T * J) * J.T * b
A couple more common names:
f'(x0).T * f'(x0) = J.T * J --> hessian approximation or H (shape = NxN) f'(x0).T * f (x0) = J.T * b --> right hand side or rhs (shape = Nx1)
So the iteration loop for optimization is:
J, b = linearize(f, x0) dx = -inv(J.T * J) * J.T * b x_new = x0 + dx
Technically what we’ve just described is the Gauss-Newton algorithm; the Levenberg-Marquardt algorithm is based on Gauss-Newton, but adds a term to J.T * J before inverting to make sure the matrix is invertible and make the optimization more stable. This additional term typically takes the form lambda * I, or lambda * diag(J.T * J), where lambda is another parameter updated by the solver at each iteration. Configuration of how this term is computed can be found in the optimizer params.
Public Types
-
using Scalar = ScalarType#
-
using LinearSolver = LinearSolverType#
-
using MatrixType = typename LinearSolverType::MatrixType#
-
using StateType = internal::LevenbergMarquardtState<MatrixType>#
-
using LinearizationType = Linearization<MatrixType>#
-
using FailureReason = levenberg_marquardt_solver_failure_reason_t#
-
using LinearizeFunc = std::function<void(const Values<Scalar>&, LinearizationType&)>#
Public Functions
-
inline LevenbergMarquardtSolver(const optimizer_params_t &p, const std::string &id, const Scalar epsilon)#
-
inline LevenbergMarquardtSolver(const optimizer_params_t &p, const std::string &id, const Scalar epsilon, const LinearSolver &linear_solver)#
-
inline void SetIndex(const index_t &index)#
-
inline const optimizer_params_t &Params() const#
-
void UpdateParams(const optimizer_params_t &p)#
-
optional<std::pair<optimization_status_t, FailureReason>> Iterate(const LinearizeFunc &func, OptimizationStats<MatrixType> &stats)#
-
inline const LinearizationType &GetBestLinearization() const#
-
void ComputeCovariance(const MatrixType &hessian_lower, MatrixX<Scalar> &covariance)#
-
using Scalar = ScalarType#
-
template<typename MatrixType>
struct Linearization# - #include <linearization.h>
Class for storing a problem linearization evaluated at a Values (i.e. a residual, jacobian, hessian, and rhs)
MatrixType is expected to be an Eigen MatrixX or SparseMatrix.
Public Functions
-
inline void Reset()#
Set to invalid
-
inline bool IsInitialized() const#
Returns whether the linearization is currently valid for the corresponding values. Accessing any of the members when this is false could result in unexpected behavior
-
inline void SetInitialized(const bool initialized = true)#
-
inline Scalar LinearDeltaError(const Vector &x_update, const Vector &damping_vector) const#
Returns the change in error predicted by the Linearization at the given update
- Parameters:
x_update – The update to the values
damping_vector – The vector added to the diagonal of the hessian during the linear solve
-
inline void Reset()#
-
struct linearized_sparse_factor_t#
Public Members
-
Eigen::VectorXd residual#
-
Eigen::SparseMatrix<double> jacobian#
-
Eigen::SparseMatrix<double> hessian#
-
Eigen::VectorXd rhs#
Public Static Functions
-
static inline constexpr const char *getTypeName()#
-
Eigen::VectorXd residual#
-
struct linearized_sparse_factorf_t#
Public Members
-
Eigen::VectorXf residual#
-
Eigen::SparseMatrix<float> jacobian#
-
Eigen::SparseMatrix<float> hessian#
-
Eigen::VectorXf rhs#
Public Static Functions
-
static inline constexpr const char *getTypeName()#
-
Eigen::VectorXf residual#
-
template<typename _S>
struct LinearizedDenseFactorTypeHelper#
-
template<typename _S>
struct LinearizedSparseFactorTypeHelper#
-
template<typename ScalarType>
class Linearizer# - #include <linearizer.h>
Class for evaluating multiple Factors at the linearization point given by a Values.
Stores the original Factors as well as the LinearizedFactors, and provides tools for aggregating keys and building a large jacobian / hessian for optimization.
For efficiency, prefer calling Relinearize() instead of re-constructing this object!
Public Functions
-
Linearizer(const std::string &name, const std::vector<Factor<Scalar>> &factors, const std::vector<Key> &key_order = {}, bool include_jacobians = false, bool debug_checks = false)#
Construct a Linearizer from factors and optional keys
- Parameters:
factors – Only stores a pointer, MUST be in scope for the lifetime of this object!
key_order – If provided, acts as an ordered set of keys that form the state vector to optimize. Can equal the set of all factor keys or a subset of all factor keys. If not provided, it is computed from all keys for all factors using a default ordering.
debug_checks – Whether to perform additional sanity checks for NaNs. This uses additional compute but not additional memory except for logging.
-
void Relinearize(const Values<Scalar> &values, SparseLinearization<Scalar> &linearization)#
Update linearization at a new evaluation point
This is more efficient than reconstructing this object repeatedly. On the first call, it will allocate memory and perform analysis needed for efficient repeated relinearization.
TODO(aaron): This should be const except that it can initialize the object
-
bool IsInitialized() const#
Whether this contains values, versus having not been evaluated yet
-
const std::vector<LinearizedSparseFactor> &LinearizedSparseFactors() const#
-
Linearizer(const std::string &name, const std::vector<Factor<Scalar>> &factors, const std::vector<Key> &key_order = {}, bool include_jacobians = false, bool debug_checks = false)#
-
template<typename MatrixType>
struct OptimizationStats# - #include <optimization_stats.h>
Debug stats for a full optimization run
Public Functions
-
inline optimization_stats_t GetLcmType() const#
-
inline auto JacobianView(const optimization_iteration_t &iteration) const#
Get a view of the Jacobian at a particular iteration
The lifetime of the result is tied to the lifetime of the OptimizationStats object
-
inline void Reset(const size_t num_iterations)#
Reset the optimization stats
Does not cause reallocation, except for things in debug stats
Public Members
-
int32_t best_index = {0}#
Index into iterations of the best iteration (containing the optimal Values)
-
optimization_status_t status = {}#
What was the result of the optimization?
-
int32_t failure_reason = {}#
If status == FAILED, why? This should be cast to the Optimizer::FailureReason enum for the nonlinear solver you used.
-
optional<Linearization<MatrixType>> best_linearization = {}#
The linearization at best_index (at optimized_values), filled out if populate_best_linearization = true
-
sparse_matrix_structure_t jacobian_sparsity = {}#
The sparsity pattern of the problem jacobian
Only filled if Optimizer created with debug_stats = true and include_jacobians = true, otherwise default constructed.
If using a dense linearization, only the shape field will be filled.
-
Eigen::VectorXi linear_solver_ordering = {}#
The permutation used by the linear solver
Only filled if using an Optimizer created with debug_stats = true and a linear solver that exposes Permutation() (such as the default SparseCholeskySolver). Otherwise, will be default constructed.
-
sparse_matrix_structure_t cholesky_factor_sparsity = {}#
The sparsity pattern of the cholesky factor L
Only filled if using an Optimizer created with debug_stats = true and a linear solver that exposes L() (such as the default SparseCholeskySolver). Otherwise, will be default constructed.
-
inline optimization_stats_t GetLcmType() const#
-
template<typename ScalarType, typename NonlinearSolverType = LevenbergMarquardtSolver<ScalarType>>
class Optimizer# - #include <optimizer.h>
Class for optimizing a nonlinear least-squares problem specified as a list of Factors. For efficient use, create once and call Optimize() multiple times with different initial guesses, as long as the factors remain constant and the structure of the Values is identical.
Not thread safe! Create one per thread.
Example usage:
// Create a Values sym::Key key0{'R', 0}; sym::Key key1{'R', 1}; sym::Valuesd values; values.Set(key0, sym::Rot3d::Identity()); values.Set(key1, sym::Rot3d::Identity()); // Create some factors std::vector<sym::Factord> factors; factors.push_back(sym::Factord::Jacobian( [epsilon](const sym::Rot3d& rot, Eigen::Vector3d* const res, Eigen::Matrix3d* const jac) { const sym::Rot3d prior = sym::Rot3d::Random(); const Eigen::Matrix3d sqrt_info = Eigen::Vector3d::Ones().asDiagonal(); sym::PriorFactorRot3<double>(rot, prior, sqrt_info, epsilon, res, jac); }, {key0})); factors.push_back(sym::Factord::Jacobian( [epsilon](const sym::Rot3d& rot, Eigen::Vector3d* const res, Eigen::Matrix3d* const jac) { const sym::Rot3d prior = sym::Rot3d::Random(); const Eigen::Matrix3d sqrt_info = Eigen::Vector3d::Ones().asDiagonal(); sym::PriorFactorRot3<double>(rot, prior, sqrt_info, epsilon, res, jac); }, {key1})); factors.push_back(sym::Factord::Jacobian( [epsilon](const sym::Rot3d& a, const sym::Rot3d& b, Eigen::Vector3d* const res, Eigen::Matrix<double, 3, 6>* const jac) { const Eigen::Matrix3d sqrt_info = Eigen::Vector3d::Ones().asDiagonal(); const sym::Rot3d a_T_b = sym::Rot3d::Random(); sym::BetweenFactorRot3<double>(a, b, a_T_b, sqrt_info, epsilon, res, jac); }, {key0, key1})); // Set up the params sym::optimizer_params_t params = DefaultLmParams(); params.iterations = 50; params.early_exit_min_reduction = 0.0001; // Optimize sym::Optimizer<double> optimizer(params, factors, epsilon); optimizer.Optimize(values);
See symforce/test/symforce_optimizer_test.cc for more examples
Public Types
-
using Scalar = ScalarType#
-
using NonlinearSolver = NonlinearSolverType#
-
using FailureReason = typename NonlinearSolver::FailureReason#
-
using MatrixType = typename NonlinearSolver::MatrixType#
-
using Stats = OptimizationStats<MatrixType>#
-
using LinearizerType = internal::LinearizerSelector_t<MatrixType>#
Public Functions
-
Optimizer(const optimizer_params_t ¶ms, std::vector<Factor<Scalar>> factors, const std::string &name = "sym::Optimize", std::vector<Key> keys = {}, const Scalar epsilon = sym::kDefaultEpsilon<Scalar>)#
Base constructor
- Parameters:
params – The params to use for the optimizer and nonlinear solver
factors – The set of factors to include
name – The name of this optimizer to be used for printing debug information
keys – The set of keys to optimize. If empty, will use all optimized keys touched by the factors
epsilon – Epsilon for numerical stability
-
Optimizer(const optimizer_params_t ¶ms, std::vector<Factor<Scalar>> factors, const Scalar epsilon, const std::string &name, std::vector<Key> keys, bool debug_stats, bool check_derivatives = false, bool include_jacobians = false)#
-
template<typename ...NonlinearSolverArgs>
Optimizer(const optimizer_params_t ¶ms, std::vector<Factor<Scalar>> factors, const std::string &name, std::vector<Key> keys, Scalar epsilon, NonlinearSolverArgs&&... nonlinear_solver_args)# Constructor that copies in factors and keys, with arguments for the nonlinear solver
See the base constructor for argument descriptions, additional arguments are forwarded to the constructor for NonlinearSolverType
-
template<typename ...NonlinearSolverArgs>
Optimizer(const optimizer_params_t ¶ms, std::vector<Factor<Scalar>> factors, Scalar epsilon, const std::string &name, std::vector<Key> keys, bool debug_stats, bool check_derivatives, bool include_jacobians, NonlinearSolverArgs&&... nonlinear_solver_args)#
-
virtual ~Optimizer() = default#
-
Stats Optimize(Values<Scalar> &values, int num_iterations = -1, bool populate_best_linearization = false)#
Optimize the given values in-place
- Parameters:
num_iterations – If < 0 (the default), uses the number of iterations specified by the params at construction
populate_best_linearization – If true, the linearization at the best values will be filled out in the stats
- Returns:
The optimization stats
-
virtual void Optimize(Values<Scalar> &values, int num_iterations, bool populate_best_linearization, Stats &stats)#
Optimize the given values in-place
This overload takes the stats as an argument, and stores into there. This allows users to avoid reallocating memory for any of the entries in the stats, for use cases where that’s important.
- Parameters:
num_iterations – If < 0 (the default), uses the number of iterations specified by the params at construction
populate_best_linearization – If true, the linearization at the best values will be filled out in the stats
stats – An OptimizationStats to fill out with the result - if filling out dynamically allocated fields here, will not reallocate if memory is already allocated in the required shape (e.g. for repeated calls to Optimize())
-
void Optimize(Values<Scalar> &values, int num_iterations, Stats &stats)#
Optimize the given values in-place
This overload takes the stats as an argument, and stores into there. This allows users to avoid reallocating memory for any of the entries in the stats, for use cases where that’s important.
- Parameters:
num_iterations – If < 0 (the default), uses the number of iterations specified by the params at construction
stats – An OptimizationStats to fill out with the result - if filling out dynamically allocated fields here, will not reallocate if memory is already allocated in the required shape (e.g. for repeated calls to Optimize())
-
void Optimize(Values<Scalar> &values, Stats &stats)#
Optimize the given values in-place
This overload takes the stats as an argument, and stores into there. This allows users to avoid reallocating memory for any of the entries in the stats, for use cases where that’s important.
- Parameters:
stats – An OptimizationStats to fill out with the result - if filling out dynamically allocated fields here, will not reallocate if memory is already allocated in the required shape (e.g. for repeated calls to Optimize())
-
Linearization<MatrixType> Linearize(const Values<Scalar> &values)#
Linearize the problem around the given values
-
void ComputeAllCovariances(const Linearization<MatrixType> &linearization, std::unordered_map<Key, MatrixX<Scalar>> &covariances_by_key)#
Get covariances for each optimized key at the given linearization
Will reuse entries in covariances_by_key, allocating new entries so that the result contains exactly the set of keys optimized by this Optimizer.
covariances_by_key
must not contain any keys that are not optimized by this Optimizer.May not be called before either Optimize() or Linearize() has been called.
-
void ComputeCovariances(const Linearization<MatrixType> &linearization, const std::vector<Key> &keys, std::unordered_map<Key, MatrixX<Scalar>> &covariances_by_key)#
Get covariances for the given subset of keys at the given linearization
This version is potentially much more efficient than computing the covariances for all keys in the problem.
Currently requires that
keys
corresponds to a set of keys at the start of the list of keys for the full problem, and in the same order. It uses the Schur complement trick, so will be most efficient if the hessian is of the following form, with C block diagonal:A = ( B E ) ( E^T C )
Will reuse entries in
covariances_by_key
, allocating new entries so that the result contains exactly the set of keys requested.covariances_by_key
must not contain any keys that are not inkeys
.
-
void ComputeFullCovariance(const Linearization<MatrixType> &linearization, MatrixX<Scalar> &covariance)#
Get the full problem covariance at the given linearization
Unlike ComputeCovariance and ComputeAllCovariances, this includes the off-diagonal blocks, i.e. the cross-covariances between different keys.
The ordering of entries here is the same as the ordering of the keys in the linearization, which can be accessed via
optimizer.Linearizer().StateIndex()
.May not be called before either Optimize() or Linearize() has been called.
- Parameters:
covariance – A matrix that will be filled out with the full problem covariance.
-
const LinearizerType &Linearizer() const#
Get the Linearizer object
-
LinearizerType &Linearizer()#
-
void UpdateParams(const optimizer_params_t ¶ms)#
Update the optimizer params
-
const optimizer_params_t &Params() const#
Get the params used by the nonlinear solver
-
using Scalar = ScalarType#
-
template<typename Scalar>
struct PreintegratedImuMeasurements# - #include <preintegrated_imu_measurements.h>
Struct of Preintegrated IMU Measurements (not including the covariance of change in orientation, velocity, and position).
Public Types
Public Functions
-
PreintegratedImuMeasurements(const Vector3 &accel_bias, const Vector3 &gyro_bias)#
Initialize instance struct with accel_bias and gyro_bias and all other values zeroed out (scalars, vectors, and matrices) or set to the identity (DR).
-
imu_integrated_measurement_t GetLcmType() const#
Public Members
Public Static Functions
-
static PreintegratedImuMeasurements<Scalar> FromLcm(const imu_integrated_measurement_t &msg)#
-
PreintegratedImuMeasurements(const Vector3 &accel_bias, const Vector3 &gyro_bias)#
-
template<class T>
struct remove_cvref
-
template<typename _MatrixType, int _UpLo = Eigen::Lower>
class SparseCholeskySolver - #include <sparse_cholesky_solver.h>
Efficiently solves
A * x = b
, where A is a sparse matrix and b is a dense vector or matrix, using the LDLT cholesky factorizationA = L * D * L^T
, where L is a unit triangular matrix and D is a diagonal matrix.When repeatedly solving systems where A changes but its sparsity pattern remains identical, this class can analyze the sparsity pattern once and use it to more efficiently factorize and solve on subsequent calls.
Public Types
-
enum [anonymous]
Values:
-
enumerator UpLo
-
enumerator UpLo
-
using MatrixType = _MatrixType
-
using Scalar = typename MatrixType::Scalar
-
using StorageIndex = typename MatrixType::StorageIndex
-
using CholMatrixType = Eigen::SparseMatrix<Scalar, Eigen::ColMajor, StorageIndex>
-
using VectorType = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>
-
using RhsType = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>
-
using PermutationMatrixType = Eigen::PermutationMatrix<Eigen::Dynamic, Eigen::Dynamic, StorageIndex>
-
using Ordering = std::function<void(const MatrixType&, PermutationMatrixType&)>
Public Functions
-
inline SparseCholeskySolver(const Ordering &ordering = Eigen::MetisOrdering<StorageIndex>())
Default constructor
- Parameters:
ordering – Functor to compute the variable ordering to use. Can be any functor with signature void(const MatrixType&, PermutationMatrixType&) which takes in the sparsity pattern of the matrix A and fills out the permutation of variables to use in the second argument. The first argument is the full matrix A, not just the upper or lower triangle; the values may not be the same as in A, but will be nonzero for entries in A that are nonzero. Typically this will be an instance of one of the orderings provided by Eigen, such as Eigen::NaturalOrdering().
-
inline explicit SparseCholeskySolver(const MatrixType &A, const Ordering &ordering = Eigen::MetisOrdering<StorageIndex>())
Construct with a representative sparse matrix
- Parameters:
A – The matrix to be factorized
ordering – Functor to compute the variable ordering to use. Can be any functor with signature void(const MatrixType&, PermutationMatrixType&) which takes in the sparsity pattern of the matrix A and fills out the permutation of variables to use in the second argument. The first argument is the full matrix A, not just the upper or lower triangle; the values may not be the same as in A, but will be nonzero for entries in A that are nonzero. Typically this will be an instance of one of the orderings provided by Eigen, such as Eigen::NaturalOrdering().
-
inline ~SparseCholeskySolver()
-
inline bool IsInitialized() const
Whether we have computed a symbolic sparsity and are ready to factorize/solve.
-
void ComputePermutationMatrix(const MatrixType &A)
Compute an efficient permutation matrix (ordering) for A and store internally.
-
void ComputeSymbolicSparsity(const MatrixType &A)
Compute symbolic sparsity pattern for A and store internally.
-
bool Factorize(const MatrixType &A)
Decompose A into A = L * D * L^T and store internally. A must have the same sparsity as the matrix used for construction. Returns true if factorization was successful, and false otherwise. NOTE(brad): Currently always returns true.
-
template<typename Rhs>
RhsType Solve(const Eigen::MatrixBase<Rhs> &b) const Returns x for A x = b, where x and b are dense.
-
template<typename Rhs>
void SolveInPlace(Eigen::MatrixBase<Rhs> &b) const Solves in place for x in A x = b, where x and b are dense.
-
inline const CholMatrixType &L() const
-
inline const VectorType &D() const
-
inline const PermutationMatrixType &Permutation() const
-
inline const PermutationMatrixType &InversePermutation() const
-
inline void AnalyzeSparsityPattern(const Eigen::SparseMatrix<Scalar> &matrix)
-
enum [anonymous]
-
template<typename _MatrixType>
class SparseSchurSolver - #include <sparse_schur_solver.h>
A solver for factorizing and solving positive definite matrices with a large block-diagonal component
This includes matrices typically encountered in SfM, where the landmarks-to-landmarks block of the Hessian is block diagonal (there are no cross-terms between different landmarks)
We should have a matrix A of structure
A = ( B E ) ( E^T C )
with C in
R^{C_dim x C_dim}
block diagonal.The full problem is A x = b, which we can break down the same way as
( B E ) ( y ) = ( v ) ( E^T C ) ( z ) ( w )
We then have two equations:
B y + E z = v E^T y + C z = w
C is block diagonal and therefore easy to invert, so we can write
z = C^{-1} (w - E^T y)
Plugging this into the first equation, we can eliminate z:
B y + E C^{-1} (w - E^T y) = v => B y + E C^{-1} w - E C^{-1} E^T y = v => (B - E C^{-1} E^T) y = v - E C^{-1} w
Defining the Schur complement
S = B - E C^{-1} E^T
, we haveS y = v - E C^{-1} w
So, we can form S and use the above equation to solve for y. Once we have y, we can use the equation for z above to solve for z, and we’re done.
See http://ceres-solver.org/nnls_solving.html#dense-schur-sparse-schur
Public Types
-
using MatrixType = _MatrixType
-
using Scalar = typename MatrixType::Scalar
-
using SMatrixSolverType = SparseCholeskySolver<MatrixType, Eigen::Lower>
-
using MatrixX = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>
-
using VectorX = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>
Public Functions
-
inline SparseSchurSolver(const typename SMatrixSolverType::Ordering &ordering = Eigen::MetisOrdering<typename SMatrixSolverType::StorageIndex>())
-
inline bool IsInitialized() const
-
void ComputeSymbolicSparsity(const MatrixType &A, int C_dim)
-
void Factorize(const MatrixType &A)
-
template<typename RhsType>
Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Solve(const Eigen::MatrixBase<RhsType> &rhs) const
-
void SInvInPlace(Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> &x_and_rhs) const
-
using MatrixType = _MatrixType
-
template<typename Scalar>
class Values - #include <values.h>
Efficient polymorphic data structure to store named types with a dict-like interface and support efficient repeated operations using a key index. Supports on-manifold optimization.
Compatible types are given by the type_t enum. All types implement the StorageOps and LieGroupOps concepts, which are the core operating mechanisms in this class.
Public Types
-
using LcmType = typename ValuesLcmTypeHelper<Scalar>::Type
Expose the correct LCM type (values_t or valuesf_t)
Public Functions
-
Values() = default
Default construct as empty.
-
explicit Values(std::initializer_list<Values<Scalar>> others)
Construct from a list of other Values objects. The order of Keys are preserved by the order of the Values in the initializer list
NOTE(alvin): others Values should not contain overlapping Keys
-
explicit Values(const LcmType &msg)
Construct from serialized form.
-
bool Has(const Key &key) const
Return whether the key exists.
-
template<typename T>
std::enable_if_t<!kIsEigenType<T>, bool> Set(const Key &key, const T &value) Add or update a value by key. Returns true if added, false if updated.
Overload for non-Eigen types
-
template<typename Derived>
std::enable_if_t<kIsEigenType<Derived>, bool> Set(const Key &key, const Derived &value) Add or update a value by key. Returns true if added, false if updated.
Overload for Eigen types
-
template<typename T>
void SetNew(const Key &key, T &&value) Add a value by key, throws runtime_error if the key already exists.
This is useful when setting up initial values for a problem.
-
void UpdateOrSet(const index_t &index, const Values<Scalar> &other)
Update or add keys to this Values base on other Values of different structure.
index
MUST be valid for other.NOTE(alvin): it is less efficient than the Update methods below if index objects are created and cached. This method performs map lookup for each key of the index
-
size_t NumEntries() const
Number of keys.
-
inline bool Empty() const
Has zero keys.
-
std::vector<Key> Keys(bool sort_by_offset = true) const
Get all keys.
NOTE(hayk): If we changed key storage to a sorted vector this could automatically be maintained and it would be more embedded friendly, but At(key) would become O(N) for linear search.
- Parameters:
sort_by_offset – Sorts by storage order to make iteration safer and more memory efficient
-
const MapType &Items() const
Expose map type to allow iteration.
-
const ArrayType &Data() const
Raw data buffer.
-
template<typename NewScalar>
Values<NewScalar> Cast() const Cast to another Scalar type (returns a copy)
-
bool Remove(const Key &key)
Remove the given key.
Only removes the index entry, does not change the data array. Returns true if removed, false if already not present.
Call Cleanup() to re-pack the data array.
-
void RemoveAll()
Remove all keys and empty out the storage.
-
size_t Cleanup()
Repack the data array to get rid of empty space from removed keys.
If regularly removing keys, it’s up to the user to call this appropriately to avoid storage growth. Returns the number of Scalar elements cleaned up from the data array.
It will INVALIDATE all indices, offset increments, and pointers. Re-create an index with CreateIndex().
-
index_t CreateIndex(const std::vector<Key> &keys) const
Create an index from the given ordered subset of keys. This object can then be used for repeated efficient operations on that subset of keys.
If you want an index of all the keys, call
values.CreateIndex(values.Keys())
.An index will be INVALIDATED if the following happens:
1) Remove() is called with a contained key, or RemoveAll() is called 2) Cleanup() is called to re-pack the data array
NOTE(hayk): We could also add a simple UpdateIndex(&index) method, since the offset is the only thing that needs to get updated after repacking.
-
index_entry_t IndexEntryAt(const Key &key) const
Retrieve an index entry by key. This performs a map lookup.
An index entry will be INVALIDATED if the following happens:
1) Remove() is called with the indexed key, or RemoveAll() is called 2) Cleanup() is called to re-pack the data array
-
optional<index_entry_t> MaybeIndexEntryAt(const Key &key) const
Retrieve an index entry by key, or the empty optional if the key is not present. This performs a map lookup.
An index entry will be INVALIDATED if the following happens:
1) Remove() is called with the indexed key, or RemoveAll() is called 2) Cleanup() is called to re-pack the data array
-
template<typename T>
T At(const index_entry_t &entry) const Retrieve a value by index entry. This avoids a map lookup compared to At(key).
-
template<typename T>
std::enable_if_t<!kIsEigenType<T>> Set(const index_entry_t &key, const T &value) Update a value by index entry with no map lookup (compared to Set(key)). This does NOT add new values and assumes the key exists already.
Overload for non-Eigen types
-
template<typename Derived>
std::enable_if_t<kIsEigenType<Derived>> Set(const index_entry_t &key, const Derived &value) Update a value by index entry with no map lookup (compared to Set(key)). This does NOT add new values and assumes the key exists already.
Overload for Eigen types
-
void Update(const index_t &index, const Values<Scalar> &other)
Efficiently update the keys given by this index from other into this. This purely copies slices of the data arrays, the index MUST be valid for both objects!
-
void Update(const index_t &index_this, const index_t &index_other, const Values<Scalar> &other)
Efficiently update the keys from a different structured Values, given by
index_this
andindex_other
. This purely copies slices of the data arrays.index_this
MUST be valid for this object;index_other
MUST be valid for other object.
-
void Retract(const index_t &index, const Scalar *delta, Scalar epsilon)
Perform a retraction from an update vector.
- Parameters:
index – Ordered list of keys in the delta vector
delta – Pointer to update vector - MUST be the size of
index.tangent_dim
!epsilon – Small constant to avoid singularities (do not use zero)
-
VectorX<Scalar> LocalCoordinates(const Values<Scalar> &others, const index_t &index, Scalar epsilon)
Express this Values in the local coordinate of others Values, i.e., this \ominus others
-
void FillLcmType(LcmType &msg, bool sort_keys = false) const
Serialize to LCM.
-
LcmType GetLcmType(bool sort_keys = false) const
-
using LcmType = typename ValuesLcmTypeHelper<Scalar>::Type
-
template<typename _S>
struct ValuesLcmTypeHelper
-
template<>
struct ValuesLcmTypeHelper<double> Public Types
-
using Type = values_t
-
using Type = values_t
-
template<>
struct ValuesLcmTypeHelper<float> Public Types
-
using Type = valuesf_t
-
using Type = valuesf_t
-
template<typename Scalar>