blob: 4816e3c20f0636dec32ed7df9e30a40e557dd48e [file] [log] [blame]
Sascha Haeberling1d2624a2013-07-23 19:00:21 -07001// Ceres Solver - A fast non-linear least squares minimizer
2// Copyright 2012 Google Inc. All rights reserved.
3// http://code.google.com/p/ceres-solver/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above copyright notice,
11// this list of conditions and the following disclaimer in the documentation
12// and/or other materials provided with the distribution.
13// * Neither the name of Google Inc. nor the names of its contributors may be
14// used to endorse or promote products derived from this software without
15// specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27// POSSIBILITY OF SUCH DAMAGE.
28//
29// Author: sameeragarwal@google.com (Sameer Agarwal)
30
Carlos Hernandez79397c22014-08-07 17:51:38 -070031#include <list>
32
Sascha Haeberling1d2624a2013-07-23 19:00:21 -070033#include "ceres/internal/eigen.h"
34#include "ceres/low_rank_inverse_hessian.h"
35#include "glog/logging.h"
36
37namespace ceres {
38namespace internal {
39
Carlos Hernandez79397c22014-08-07 17:51:38 -070040// The (L)BFGS algorithm explicitly requires that the secant equation:
41//
42// B_{k+1} * s_k = y_k
43//
44// Is satisfied at each iteration, where B_{k+1} is the approximated
45// Hessian at the k+1-th iteration, s_k = (x_{k+1} - x_{k}) and
46// y_k = (grad_{k+1} - grad_{k}). As the approximated Hessian must be
47// positive definite, this is equivalent to the condition:
48//
49// s_k^T * y_k > 0 [s_k^T * B_{k+1} * s_k = s_k^T * y_k > 0]
50//
51// This condition would always be satisfied if the function was strictly
52// convex, alternatively, it is always satisfied provided that a Wolfe line
53// search is used (even if the function is not strictly convex). See [1]
54// (p138) for a proof.
55//
56// Although Ceres will always use a Wolfe line search when using (L)BFGS,
57// practical implementation considerations mean that the line search
58// may return a point that satisfies only the Armijo condition, and thus
59// could violate the Secant equation. As such, we will only use a step
60// to update the Hessian approximation if:
61//
62// s_k^T * y_k > tolerance
63//
64// It is important that tolerance is very small (and >=0), as otherwise we
65// might skip the update too often and fail to capture important curvature
66// information in the Hessian. For example going from 1e-10 -> 1e-14 improves
67// the NIST benchmark score from 43/54 to 53/54.
68//
69// [1] Nocedal J., Wright S., Numerical Optimization, 2nd Ed. Springer, 1999.
70//
71// TODO(alexs.mac): Consider using Damped BFGS update instead of
72// skipping update.
73const double kLBFGSSecantConditionHessianUpdateTolerance = 1e-14;
74
Sascha Haeberling1d2624a2013-07-23 19:00:21 -070075LowRankInverseHessian::LowRankInverseHessian(
76 int num_parameters,
77 int max_num_corrections,
78 bool use_approximate_eigenvalue_scaling)
79 : num_parameters_(num_parameters),
80 max_num_corrections_(max_num_corrections),
81 use_approximate_eigenvalue_scaling_(use_approximate_eigenvalue_scaling),
Sascha Haeberling1d2624a2013-07-23 19:00:21 -070082 approximate_eigenvalue_scale_(1.0),
83 delta_x_history_(num_parameters, max_num_corrections),
84 delta_gradient_history_(num_parameters, max_num_corrections),
85 delta_x_dot_delta_gradient_(max_num_corrections) {
86}
87
88bool LowRankInverseHessian::Update(const Vector& delta_x,
89 const Vector& delta_gradient) {
90 const double delta_x_dot_delta_gradient = delta_x.dot(delta_gradient);
Carlos Hernandez79397c22014-08-07 17:51:38 -070091 if (delta_x_dot_delta_gradient <=
92 kLBFGSSecantConditionHessianUpdateTolerance) {
93 VLOG(2) << "Skipping L-BFGS Update, delta_x_dot_delta_gradient too "
94 << "small: " << delta_x_dot_delta_gradient << ", tolerance: "
95 << kLBFGSSecantConditionHessianUpdateTolerance
96 << " (Secant condition).";
Sascha Haeberling1d2624a2013-07-23 19:00:21 -070097 return false;
98 }
99
Sascha Haeberling1d2624a2013-07-23 19:00:21 -0700100
Carlos Hernandez79397c22014-08-07 17:51:38 -0700101 int next = indices_.size();
102 // Once the size of the list reaches max_num_corrections_, simulate
103 // a circular buffer by removing the first element of the list and
104 // making it the next position where the LBFGS history is stored.
105 if (next == max_num_corrections_) {
106 next = indices_.front();
107 indices_.pop_front();
Sascha Haeberling1d2624a2013-07-23 19:00:21 -0700108 }
109
Carlos Hernandez79397c22014-08-07 17:51:38 -0700110 indices_.push_back(next);
111 delta_x_history_.col(next) = delta_x;
112 delta_gradient_history_.col(next) = delta_gradient;
113 delta_x_dot_delta_gradient_(next) = delta_x_dot_delta_gradient;
Sascha Haeberling1d2624a2013-07-23 19:00:21 -0700114 approximate_eigenvalue_scale_ =
115 delta_x_dot_delta_gradient / delta_gradient.squaredNorm();
116 return true;
117}
118
119void LowRankInverseHessian::RightMultiply(const double* x_ptr,
120 double* y_ptr) const {
121 ConstVectorRef gradient(x_ptr, num_parameters_);
122 VectorRef search_direction(y_ptr, num_parameters_);
123
124 search_direction = gradient;
125
Carlos Hernandez79397c22014-08-07 17:51:38 -0700126 const int num_corrections = indices_.size();
127 Vector alpha(num_corrections);
Sascha Haeberling1d2624a2013-07-23 19:00:21 -0700128
Carlos Hernandez79397c22014-08-07 17:51:38 -0700129 for (std::list<int>::const_reverse_iterator it = indices_.rbegin();
130 it != indices_.rend();
131 ++it) {
132 const double alpha_i = delta_x_history_.col(*it).dot(search_direction) /
133 delta_x_dot_delta_gradient_(*it);
134 search_direction -= alpha_i * delta_gradient_history_.col(*it);
135 alpha(*it) = alpha_i;
Sascha Haeberling1d2624a2013-07-23 19:00:21 -0700136 }
137
138 if (use_approximate_eigenvalue_scaling_) {
139 // Rescale the initial inverse Hessian approximation (H_0) to be iteratively
140 // updated so that it is of similar 'size' to the true inverse Hessian along
141 // the most recent search direction. As shown in [1]:
142 //
143 // \gamma_k = (delta_gradient_{k-1}' * delta_x_{k-1}) /
144 // (delta_gradient_{k-1}' * delta_gradient_{k-1})
145 //
146 // Satisfies:
147 //
148 // (1 / \lambda_m) <= \gamma_k <= (1 / \lambda_1)
149 //
150 // Where \lambda_1 & \lambda_m are the smallest and largest eigenvalues of
151 // the true Hessian (not the inverse) along the most recent search direction
152 // respectively. Thus \gamma is an approximate eigenvalue of the true
153 // inverse Hessian, and choosing: H_0 = I * \gamma will yield a starting
154 // point that has a similar scale to the true inverse Hessian. This
155 // technique is widely reported to often improve convergence, however this
156 // is not universally true, particularly if there are errors in the initial
157 // jacobians, or if there are significant differences in the sensitivity
158 // of the problem to the parameters (i.e. the range of the magnitudes of
159 // the components of the gradient is large).
160 //
161 // The original origin of this rescaling trick is somewhat unclear, the
162 // earliest reference appears to be Oren [1], however it is widely discussed
163 // without specific attributation in various texts including [2] (p143/178).
164 //
165 // [1] Oren S.S., Self-scaling variable metric (SSVM) algorithms Part II:
166 // Implementation and experiments, Management Science,
167 // 20(5), 863-874, 1974.
168 // [2] Nocedal J., Wright S., Numerical Optimization, Springer, 1999.
169 search_direction *= approximate_eigenvalue_scale_;
Carlos Hernandez79397c22014-08-07 17:51:38 -0700170
171 VLOG(4) << "Applying approximate_eigenvalue_scale: "
172 << approximate_eigenvalue_scale_ << " to initial inverse Hessian "
173 << "approximation.";
Sascha Haeberling1d2624a2013-07-23 19:00:21 -0700174 }
175
Carlos Hernandez79397c22014-08-07 17:51:38 -0700176 for (std::list<int>::const_iterator it = indices_.begin();
177 it != indices_.end();
178 ++it) {
179 const double beta = delta_gradient_history_.col(*it).dot(search_direction) /
180 delta_x_dot_delta_gradient_(*it);
181 search_direction += delta_x_history_.col(*it) * (alpha(*it) - beta);
Sascha Haeberling1d2624a2013-07-23 19:00:21 -0700182 }
183}
184
185} // namespace internal
186} // namespace ceres