blob: 0e56342a8af56cb22885dd5bfdcd174e4c457d72 [file] [log] [blame]
Carlos Hernandez7faaa9f2014-08-05 17:53:32 -07001// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2012 Giacomo Po <gpo@ucla.edu>
5// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
6//
7// This Source Code Form is subject to the terms of the Mozilla
8// Public License v. 2.0. If a copy of the MPL was not distributed
9// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
11
12#ifndef EIGEN_MINRES_H_
13#define EIGEN_MINRES_H_
14
15
16namespace Eigen {
17
18 namespace internal {
19
20 /** \internal Low-level MINRES algorithm
21 * \param mat The matrix A
22 * \param rhs The right hand side vector b
23 * \param x On input and initial solution, on output the computed solution.
24 * \param precond A right preconditioner being able to efficiently solve for an
25 * approximation of Ax=b (regardless of b)
26 * \param iters On input the max number of iteration, on output the number of performed iterations.
27 * \param tol_error On input the tolerance error, on output an estimation of the relative error.
28 */
29 template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
30 EIGEN_DONT_INLINE
31 void minres(const MatrixType& mat, const Rhs& rhs, Dest& x,
32 const Preconditioner& precond, int& iters,
33 typename Dest::RealScalar& tol_error)
34 {
35 using std::sqrt;
36 typedef typename Dest::RealScalar RealScalar;
37 typedef typename Dest::Scalar Scalar;
38 typedef Matrix<Scalar,Dynamic,1> VectorType;
39
40 // initialize
41 const int maxIters(iters); // initialize maxIters to iters
42 const int N(mat.cols()); // the size of the matrix
43 const RealScalar rhsNorm2(rhs.squaredNorm());
44 const RealScalar threshold2(tol_error*tol_error*rhsNorm2); // convergence threshold (compared to residualNorm2)
45
46 // Initialize preconditioned Lanczos
47// VectorType v_old(N); // will be initialized inside loop
48 VectorType v( VectorType::Zero(N) ); //initialize v
49 VectorType v_new(rhs-mat*x); //initialize v_new
50 RealScalar residualNorm2(v_new.squaredNorm());
51// VectorType w(N); // will be initialized inside loop
52 VectorType w_new(precond.solve(v_new)); // initialize w_new
53// RealScalar beta; // will be initialized inside loop
54 RealScalar beta_new2(v_new.dot(w_new));
55 eigen_assert(beta_new2 >= 0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
56 RealScalar beta_new(sqrt(beta_new2));
57 const RealScalar beta_one(beta_new);
58 v_new /= beta_new;
59 w_new /= beta_new;
60 // Initialize other variables
61 RealScalar c(1.0); // the cosine of the Givens rotation
62 RealScalar c_old(1.0);
63 RealScalar s(0.0); // the sine of the Givens rotation
64 RealScalar s_old(0.0); // the sine of the Givens rotation
65// VectorType p_oold(N); // will be initialized in loop
66 VectorType p_old(VectorType::Zero(N)); // initialize p_old=0
67 VectorType p(p_old); // initialize p=0
68 RealScalar eta(1.0);
69
70 iters = 0; // reset iters
71 while ( iters < maxIters ){
72
73 // Preconditioned Lanczos
74 /* Note that there are 4 variants on the Lanczos algorithm. These are
75 * described in Paige, C. C. (1972). Computational variants of
76 * the Lanczos method for the eigenproblem. IMA Journal of Applied
77 * Mathematics, 10(3), 373–381. The current implementation corresponds
78 * to the case A(2,7) in the paper. It also corresponds to
79 * algorithm 6.14 in Y. Saad, Iterative Methods for Sparse Linear
80 * Systems, 2003 p.173. For the preconditioned version see
81 * A. Greenbaum, Iterative Methods for Solving Linear Systems, SIAM (1987).
82 */
83 const RealScalar beta(beta_new);
84// v_old = v; // update: at first time step, this makes v_old = 0 so value of beta doesn't matter
85 const VectorType v_old(v); // NOT SURE IF CREATING v_old EVERY ITERATION IS EFFICIENT
86 v = v_new; // update
87// w = w_new; // update
88 const VectorType w(w_new); // NOT SURE IF CREATING w EVERY ITERATION IS EFFICIENT
89 v_new.noalias() = mat*w - beta*v_old; // compute v_new
90 const RealScalar alpha = v_new.dot(w);
91 v_new -= alpha*v; // overwrite v_new
92 w_new = precond.solve(v_new); // overwrite w_new
93 beta_new2 = v_new.dot(w_new); // compute beta_new
94 eigen_assert(beta_new2 >= 0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
95 beta_new = sqrt(beta_new2); // compute beta_new
96 v_new /= beta_new; // overwrite v_new for next iteration
97 w_new /= beta_new; // overwrite w_new for next iteration
98
99 // Givens rotation
100 const RealScalar r2 =s*alpha+c*c_old*beta; // s, s_old, c and c_old are still from previous iteration
101 const RealScalar r3 =s_old*beta; // s, s_old, c and c_old are still from previous iteration
102 const RealScalar r1_hat=c*alpha-c_old*s*beta;
103 const RealScalar r1 =sqrt( std::pow(r1_hat,2) + std::pow(beta_new,2) );
104 c_old = c; // store for next iteration
105 s_old = s; // store for next iteration
106 c=r1_hat/r1; // new cosine
107 s=beta_new/r1; // new sine
108
109 // Update solution
110// p_oold = p_old;
111 const VectorType p_oold(p_old); // NOT SURE IF CREATING p_oold EVERY ITERATION IS EFFICIENT
112 p_old = p;
113 p.noalias()=(w-r2*p_old-r3*p_oold) /r1; // IS NOALIAS REQUIRED?
114 x += beta_one*c*eta*p;
115 residualNorm2 *= s*s;
116
117 if ( residualNorm2 < threshold2){
118 break;
119 }
120
121 eta=-s*eta; // update eta
122 iters++; // increment iteration number (for output purposes)
123 }
124 tol_error = std::sqrt(residualNorm2 / rhsNorm2); // return error. Note that this is the estimated error. The real error |Ax-b|/|b| may be slightly larger
125 }
126
127 }
128
129 template< typename _MatrixType, int _UpLo=Lower,
130 typename _Preconditioner = IdentityPreconditioner>
131// typename _Preconditioner = IdentityPreconditioner<typename _MatrixType::Scalar> > // preconditioner must be positive definite
132 class MINRES;
133
134 namespace internal {
135
136 template< typename _MatrixType, int _UpLo, typename _Preconditioner>
137 struct traits<MINRES<_MatrixType,_UpLo,_Preconditioner> >
138 {
139 typedef _MatrixType MatrixType;
140 typedef _Preconditioner Preconditioner;
141 };
142
143 }
144
145 /** \ingroup IterativeLinearSolvers_Module
146 * \brief A minimal residual solver for sparse symmetric problems
147 *
148 * This class allows to solve for A.x = b sparse linear problems using the MINRES algorithm
149 * of Paige and Saunders (1975). The sparse matrix A must be symmetric (possibly indefinite).
150 * The vectors x and b can be either dense or sparse.
151 *
152 * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
153 * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
154 * or Upper. Default is Lower.
155 * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
156 *
157 * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
158 * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
159 * and NumTraits<Scalar>::epsilon() for the tolerance.
160 *
161 * This class can be used as the direct solver classes. Here is a typical usage example:
162 * \code
163 * int n = 10000;
164 * VectorXd x(n), b(n);
165 * SparseMatrix<double> A(n,n);
166 * // fill A and b
167 * MINRES<SparseMatrix<double> > mr;
168 * mr.compute(A);
169 * x = mr.solve(b);
170 * std::cout << "#iterations: " << mr.iterations() << std::endl;
171 * std::cout << "estimated error: " << mr.error() << std::endl;
172 * // update b, and solve again
173 * x = mr.solve(b);
174 * \endcode
175 *
176 * By default the iterations start with x=0 as an initial guess of the solution.
177 * One can control the start using the solveWithGuess() method. Here is a step by
178 * step execution example starting with a random guess and printing the evolution
179 * of the estimated error:
180 * * \code
181 * x = VectorXd::Random(n);
182 * mr.setMaxIterations(1);
183 * int i = 0;
184 * do {
185 * x = mr.solveWithGuess(b,x);
186 * std::cout << i << " : " << mr.error() << std::endl;
187 * ++i;
188 * } while (mr.info()!=Success && i<100);
189 * \endcode
190 * Note that such a step by step excution is slightly slower.
191 *
192 * \sa class ConjugateGradient, BiCGSTAB, SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
193 */
194 template< typename _MatrixType, int _UpLo, typename _Preconditioner>
195 class MINRES : public IterativeSolverBase<MINRES<_MatrixType,_UpLo,_Preconditioner> >
196 {
197
198 typedef IterativeSolverBase<MINRES> Base;
199 using Base::mp_matrix;
200 using Base::m_error;
201 using Base::m_iterations;
202 using Base::m_info;
203 using Base::m_isInitialized;
204 public:
205 typedef _MatrixType MatrixType;
206 typedef typename MatrixType::Scalar Scalar;
207 typedef typename MatrixType::Index Index;
208 typedef typename MatrixType::RealScalar RealScalar;
209 typedef _Preconditioner Preconditioner;
210
211 enum {UpLo = _UpLo};
212
213 public:
214
215 /** Default constructor. */
216 MINRES() : Base() {}
217
218 /** Initialize the solver with matrix \a A for further \c Ax=b solving.
219 *
220 * This constructor is a shortcut for the default constructor followed
221 * by a call to compute().
222 *
223 * \warning this class stores a reference to the matrix A as well as some
224 * precomputed values that depend on it. Therefore, if \a A is changed
225 * this class becomes invalid. Call compute() to update it with the new
226 * matrix A, or modify a copy of A.
227 */
228 MINRES(const MatrixType& A) : Base(A) {}
229
230 /** Destructor. */
231 ~MINRES(){}
232
233 /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
234 * \a x0 as an initial solution.
235 *
236 * \sa compute()
237 */
238 template<typename Rhs,typename Guess>
239 inline const internal::solve_retval_with_guess<MINRES, Rhs, Guess>
240 solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
241 {
242 eigen_assert(m_isInitialized && "MINRES is not initialized.");
243 eigen_assert(Base::rows()==b.rows()
244 && "MINRES::solve(): invalid number of rows of the right hand side matrix b");
245 return internal::solve_retval_with_guess
246 <MINRES, Rhs, Guess>(*this, b.derived(), x0);
247 }
248
249 /** \internal */
250 template<typename Rhs,typename Dest>
251 void _solveWithGuess(const Rhs& b, Dest& x) const
252 {
253 m_iterations = Base::maxIterations();
254 m_error = Base::m_tolerance;
255
256 for(int j=0; j<b.cols(); ++j)
257 {
258 m_iterations = Base::maxIterations();
259 m_error = Base::m_tolerance;
260
261 typename Dest::ColXpr xj(x,j);
262 internal::minres(mp_matrix->template selfadjointView<UpLo>(), b.col(j), xj,
263 Base::m_preconditioner, m_iterations, m_error);
264 }
265
266 m_isInitialized = true;
267 m_info = m_error <= Base::m_tolerance ? Success : NoConvergence;
268 }
269
270 /** \internal */
271 template<typename Rhs,typename Dest>
272 void _solve(const Rhs& b, Dest& x) const
273 {
274 x.setZero();
275 _solveWithGuess(b,x);
276 }
277
278 protected:
279
280 };
281
282 namespace internal {
283
284 template<typename _MatrixType, int _UpLo, typename _Preconditioner, typename Rhs>
285 struct solve_retval<MINRES<_MatrixType,_UpLo,_Preconditioner>, Rhs>
286 : solve_retval_base<MINRES<_MatrixType,_UpLo,_Preconditioner>, Rhs>
287 {
288 typedef MINRES<_MatrixType,_UpLo,_Preconditioner> Dec;
289 EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
290
291 template<typename Dest> void evalTo(Dest& dst) const
292 {
293 dec()._solve(rhs(),dst);
294 }
295 };
296
297 } // end namespace internal
298
299} // end namespace Eigen
300
301#endif // EIGEN_MINRES_H
302