Re-bundle new Ceres library
- Fixes some harmless issues (in cases blender doesn't use Ceres yet) - Fixes some compilation warnings
This commit is contained in:
261
extern/libmv/third_party/ceres/ChangeLog
vendored
261
extern/libmv/third_party/ceres/ChangeLog
vendored
@@ -1,3 +1,136 @@
|
||||
commit 80a53eebfd28bfc032cedbf7852d5c56eb1d5af5
|
||||
Author: Sameer Agarwal <sameeragarwal@google.com>
|
||||
Date: Thu Jan 9 12:40:54 2014 -0800
|
||||
|
||||
Faster LBFGS.
|
||||
|
||||
1. Use column major storage for the various matrices used by
|
||||
LowRankInverseHessian. Since all the operations are on columns.
|
||||
|
||||
2. Use a circular buffer to keep track of history of the LBFGS updates
|
||||
so that an update does not require copying the entire history. This
|
||||
makes the updates O(1) rather than O(rank).
|
||||
|
||||
The implementation has been checked against the denoising code
|
||||
where it gives numerically identical results. The overhead of the
|
||||
LBFGS code is now near negligible as compared to the gradient evaluation.
|
||||
|
||||
On a sample problem
|
||||
|
||||
before 1050ms after: 630ms
|
||||
|
||||
Change-Id: I537ba506ac35fc4960b304c10d923a8dea2ae031
|
||||
|
||||
commit f55780063620e7a3dcfe7e018d6488bf6a5b29da
|
||||
Author: Sameer Agarwal <sameeragarwal@google.com>
|
||||
Date: Wed Jan 8 10:43:31 2014 -0800
|
||||
|
||||
Reduce logging verbosity.
|
||||
|
||||
When user specifies Solver::Options::logging_type = SILENT,
|
||||
ensure that the minimizer does not log anything.
|
||||
|
||||
Change-Id: I94e34dae504881ab36d4a66e6adb7a19a227363e
|
||||
|
||||
commit 85561eee951c91e578984c6d3eecf0073acabb64
|
||||
Author: Sameer Agarwal <sameeragarwal@google.com>
|
||||
Date: Tue Jan 7 22:22:14 2014 -0800
|
||||
|
||||
Use int32 for parameter block sizes.
|
||||
|
||||
CostFunction now uses int32 instead of int16
|
||||
to store the size of its parameter blocks.
|
||||
|
||||
This is an API breaking change.
|
||||
|
||||
Change-Id: I032ea583bc7ea4b3009be25d23a3be143749c73e
|
||||
|
||||
commit a7fda3317b1a97702750bea96ac3ef3d1a2afb49
|
||||
Author: Alex Stewart <alexs.mac@gmail.com>
|
||||
Date: Mon Jan 6 10:25:42 2014 +0000
|
||||
|
||||
Fix typos in error messages in line search config checks.
|
||||
|
||||
Change-Id: I3ae2ae58328e996598e3e32c12869d2b10109ef7
|
||||
|
||||
commit f695322eb8c5ff118f0d27f68d46d557338e5db1
|
||||
Author: Sameer Agarwal <sameeragarwal@google.com>
|
||||
Date: Sat Jan 4 14:28:23 2014 -0800
|
||||
|
||||
Remove a compilation warning on windows.
|
||||
|
||||
Only define NOMINMAX if it is not already defined.
|
||||
|
||||
Thanks to Pierre Moulon for this fix.
|
||||
|
||||
Change-Id: Ia5dc0f5ff2afe10e4c7e97a57f54297d82052b21
|
||||
|
||||
commit b811041d78d80518db153ef3030bcbdbaf80df8d
|
||||
Author: Sergey Sharybin <sergey.vfx@gmail.com>
|
||||
Date: Thu Jan 2 15:19:17 2014 +0600
|
||||
|
||||
Code cleanup: fix no previous declaration warnings
|
||||
|
||||
Moved some internally used functions into an anonymous namespace.
|
||||
|
||||
Change-Id: Ie82df61b0608abac79ccc9f7b14e7f7e04ab733d
|
||||
|
||||
commit f14f6bf9b7d3fbd2cab939cf4ad615b317e93c83
|
||||
Author: Sameer Agarwal <sameeragarwal@google.com>
|
||||
Date: Thu Dec 26 09:50:45 2013 -0800
|
||||
|
||||
Speed up SPARSE_NORMAL_CHOLESKY when using CX_SPARSE.
|
||||
|
||||
When using sparse cholesky factorization to solve the linear
|
||||
least squares problem:
|
||||
|
||||
Ax = b
|
||||
|
||||
There are two sources of computational complexity.
|
||||
|
||||
1. Computing H = A'A
|
||||
2. Computing the sparse Cholesky factorization of H.
|
||||
|
||||
Doing 1. using CX_SPARSE is particularly expensive, as it uses
|
||||
a generic cs_multiply function which computes the structure of
|
||||
the matrix H everytime, reallocates memory and does not take
|
||||
advantage of the fact that the matrix being computed is a symmetric
|
||||
outer product.
|
||||
|
||||
This change adds a custom symmetric outer product algorithm for
|
||||
CompressedRowSparseMatrix.
|
||||
|
||||
It has a symbolic phase, where it computes the sparsity structure
|
||||
of the output matrix and a "program" which allows the actual
|
||||
multiplication routine to determine exactly which entry in the
|
||||
values array each term in the product contributes to.
|
||||
|
||||
With these two bits of information, the outer product H = A'A
|
||||
can be computed extremely fast without any reasoning about
|
||||
the structure of H.
|
||||
|
||||
Further gains in efficiency are made by exploiting the block
|
||||
structure of A.
|
||||
|
||||
With this change, SPARSE_NORMAL_CHOLESKY with CX_SPARSE as the
|
||||
backend results in > 300% speedup for some problems.
|
||||
|
||||
The symbolic analysis phase of the solver is a bit more expensive
|
||||
now but the increased cost is made up in 3-4 iterations.
|
||||
|
||||
Change-Id: I5e4a72b4d03ba41b378a2634330bc22b299c0f12
|
||||
|
||||
commit d79f886eb87cb064e19eb12c1ad3d45bbed92198
|
||||
Author: Sameer Agarwal <sameeragarwal@google.com>
|
||||
Date: Mon Dec 30 07:39:10 2013 -0800
|
||||
|
||||
Refactor line search error checking code.
|
||||
|
||||
Move the error checking code into its own function
|
||||
so that it can be used in upcoming changes.
|
||||
|
||||
Change-Id: Icf348e5a8bbe8f8b663f04fb8cfc9a2149b12f22
|
||||
|
||||
commit 2b16b0080b6e673eaaf9ed478c9e971d9fcd65de
|
||||
Author: Sameer Agarwal <sameeragarwal@google.com>
|
||||
Date: Fri Dec 20 15:22:26 2013 -0800
|
||||
@@ -553,131 +686,3 @@ Date: Tue Nov 5 13:10:27 2013 +0000
|
||||
information as to which sub-modules are missing.
|
||||
|
||||
Change-Id: I6eed94af49263540b8f87917b75c41b8f49658a0
|
||||
|
||||
commit 9ba0b352a282f08b1b6368a5690434407d7c81af
|
||||
Author: Sameer Agarwal <sameeragarwal@google.com>
|
||||
Date: Tue Nov 5 13:04:56 2013 -0800
|
||||
|
||||
Lint and other cleanups from William Rucklidge
|
||||
|
||||
Change-Id: I7fb23c2db85f0f121204560b79f1966f3d584431
|
||||
|
||||
commit 69bd65ff4368ce2841519f00ff48c5284c1743a3
|
||||
Author: Alex Stewart <alexs.mac@gmail.com>
|
||||
Date: Mon Nov 4 23:01:14 2013 +0000
|
||||
|
||||
Downgrading warning messages when optional deps are not found.
|
||||
|
||||
- Now when find_package() is called for a dependency without the
|
||||
REQUIRED or QUIET qualifiers, we emit no priority (above STATUS, but
|
||||
below WARNING) messages and continue.
|
||||
|
||||
Change-Id: I8cdeda7a8f6c91d45fb7f24fb366244c6c9b66e1
|
||||
|
||||
commit b0a8731fcdde31e6c37a54e8c1e1c00f853c0d5c
|
||||
Author: Alex Stewart <alexs.mac@gmail.com>
|
||||
Date: Mon Nov 4 20:32:40 2013 +0000
|
||||
|
||||
Removing duplicate SuiteSparse found message.
|
||||
|
||||
- Also flipping ordering of variables in
|
||||
find_package_handle_standard_args() so that the automatically
|
||||
generated message prints the include directories, not TRUE.
|
||||
|
||||
Change-Id: I2bf62eacd5c96f27152e9542b9a74651243a584e
|
||||
|
||||
commit 6fed9fe0de9d1737095c24e19ad8df9735b7e572
|
||||
Author: Alex Stewart <alexs.mac@gmail.com>
|
||||
Date: Mon Nov 4 18:33:05 2013 +0000
|
||||
|
||||
Fix FindPackage scripts to emit warnings, not errors if not found.
|
||||
|
||||
- Previously we used message priority: SEND_ERROR when a package was
|
||||
not found and find_package() was called without QUIET or REQUIRED,
|
||||
which emits an error message, and prevents generation, but continues
|
||||
configuration.
|
||||
- The fact SEND_ERROR induces an error message was confusing for users
|
||||
as it implies that something bad has happened and they cannot
|
||||
continue, when in fact we were disabling the option in question
|
||||
and were thus able to continue, all they had to do was re-configure.
|
||||
|
||||
- This commit also reorders the search lists for includes/libraries
|
||||
so that we always search user installed locations (e.g. /usr/local)
|
||||
before system installed locations. Thus we will now always prefer
|
||||
a user install to a system install if both are available, which is
|
||||
likely to be the users desired intention.
|
||||
|
||||
Change-Id: Ide84919f27d3373f31282f70c685720cd77a6723
|
||||
|
||||
commit cada337149cbc4b9e6f2bae14593b87ecf8f1a5c
|
||||
Author: Alex Stewart <alexs.mac@gmail.com>
|
||||
Date: Mon Nov 4 18:08:24 2013 +0000
|
||||
|
||||
Fixing CXSparse include directories statement.
|
||||
|
||||
- Reported as issue #135:
|
||||
https://code.google.com/p/ceres-solver/issues/detail?id=135.
|
||||
- CXSPARSE_INCLUDE was the legacy include directory variable, since
|
||||
the buildsystem updates we now use the CMake standard:
|
||||
CXSPARSE_INCLUDE_DIRS.
|
||||
|
||||
Change-Id: Iab0c2de14d524bb9e9da230bc574b5e6f09e1f31
|
||||
|
||||
commit c71085ed326239dc2d318d848ded9a99e4e3c107
|
||||
Author: Sameer Agarwal <sameeragarwal@google.com>
|
||||
Date: Thu Oct 31 13:56:38 2013 -0700
|
||||
|
||||
Update to 1.8.0rc1.
|
||||
|
||||
Change-Id: Iaa10fd5a20be2ef84aca0119306c44669d87cc5d
|
||||
|
||||
commit 88a703f44ff0d6d5d4601584fa77f5ce853025f4
|
||||
Author: Petter Strandmark <petter.strandmark@gmail.com>
|
||||
Date: Thu Oct 31 21:13:48 2013 +0100
|
||||
|
||||
Fix compilation in Visual C++ 2013.
|
||||
|
||||
I had to fix the following things to make Ceres compile in 2013:
|
||||
* Not link to 'm' (GNU math library).
|
||||
* Excplicitly convert an std::ostream to bool.
|
||||
* Include <algorithm> for std::max.
|
||||
|
||||
Change-Id: I3ff65413baf8711364360d46dd71fd553fa63e72
|
||||
|
||||
commit f06b9face5bfbbc2b338aa2460bee2298a3865c5
|
||||
Author: Sameer Agarwal <sameeragarwal@google.com>
|
||||
Date: Sun Oct 27 21:38:13 2013 -0700
|
||||
|
||||
Add support for multiple visibility clustering algorithms.
|
||||
|
||||
The original visibility based preconditioning paper and
|
||||
implementation only used the canonical views algorithm.
|
||||
|
||||
This algorithm for large dense graphs can be particularly
|
||||
expensive. As its worst case complexity is cubic in size
|
||||
of the graph.
|
||||
|
||||
Further, for many uses the SCHUR_JACOBI preconditioner
|
||||
was both effective enough while being cheap. It however
|
||||
suffers from a fatal flaw. If the camera parameter blocks
|
||||
are split between two or more parameter blocks, e.g,
|
||||
extrinsics and intrinsics. The preconditioner because
|
||||
it is block diagonal will not capture the interactions
|
||||
between them.
|
||||
|
||||
Using CLUSTER_JACOBI or CLUSTER_TRIDIAGONAL will fix
|
||||
this problem but as mentioned above this can be quite
|
||||
expensive depending on the problem.
|
||||
|
||||
This change extends the visibility based preconditioner
|
||||
to allow for multiple clustering algorithms. And adds
|
||||
a simple thresholded single linkage clustering algorithm
|
||||
which allows you to construct versions of CLUSTER_JACOBI
|
||||
and CLUSTER_TRIDIAGONAL preconditioners that are cheap
|
||||
to construct and are more effective than SCHUR_JACOBI.
|
||||
|
||||
Currently the constants controlling the threshold above
|
||||
which edges are considered in the single linkage algorithm
|
||||
are not exposed. This would be done in a future change.
|
||||
|
||||
Change-Id: I7ddc36790943f24b19c7f08b10694ae9a822f5c9
|
||||
|
@@ -115,7 +115,7 @@ class CostFunction {
|
||||
double* residuals,
|
||||
double** jacobians) const = 0;
|
||||
|
||||
const vector<int16>& parameter_block_sizes() const {
|
||||
const vector<int32>& parameter_block_sizes() const {
|
||||
return parameter_block_sizes_;
|
||||
}
|
||||
|
||||
@@ -124,7 +124,7 @@ class CostFunction {
|
||||
}
|
||||
|
||||
protected:
|
||||
vector<int16>* mutable_parameter_block_sizes() {
|
||||
vector<int32>* mutable_parameter_block_sizes() {
|
||||
return ¶meter_block_sizes_;
|
||||
}
|
||||
|
||||
@@ -135,7 +135,7 @@ class CostFunction {
|
||||
private:
|
||||
// Cost function signature metadata: number of inputs & their sizes,
|
||||
// number of outputs (residuals).
|
||||
vector<int16> parameter_block_sizes_;
|
||||
vector<int32> parameter_block_sizes_;
|
||||
int num_residuals_;
|
||||
CERES_DISALLOW_COPY_AND_ASSIGN(CostFunction);
|
||||
};
|
||||
|
@@ -127,7 +127,7 @@ class CostFunctionToFunctor {
|
||||
<< N3 << ", " << N4 << ", " << N5 << ", " << N6 << ", " << N7 << ", "
|
||||
<< N8 << ", " << N9;
|
||||
|
||||
const vector<int16>& parameter_block_sizes =
|
||||
const vector<int32>& parameter_block_sizes =
|
||||
cost_function->parameter_block_sizes();
|
||||
const int num_parameter_blocks =
|
||||
(N0 > 0) + (N1 > 0) + (N2 > 0) + (N3 > 0) + (N4 > 0) +
|
||||
@@ -679,7 +679,7 @@ class CostFunctionToFunctor {
|
||||
template <typename JetT>
|
||||
bool EvaluateWithJets(const JetT** inputs, JetT* output) const {
|
||||
const int kNumParameters = N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9;
|
||||
const vector<int16>& parameter_block_sizes =
|
||||
const vector<int32>& parameter_block_sizes =
|
||||
cost_function_->parameter_block_sizes();
|
||||
const int num_parameter_blocks = parameter_block_sizes.size();
|
||||
const int num_residuals = cost_function_->num_residuals();
|
||||
@@ -732,7 +732,7 @@ class CostFunctionToFunctor {
|
||||
output[i].v.setZero();
|
||||
|
||||
for (int j = 0; j < num_parameter_blocks; ++j) {
|
||||
const int16 block_size = parameter_block_sizes[j];
|
||||
const int32 block_size = parameter_block_sizes[j];
|
||||
for (int k = 0; k < parameter_block_sizes[j]; ++k) {
|
||||
output[i].v +=
|
||||
jacobian_blocks[j][i * block_size + k] * inputs[j][k].v;
|
||||
|
@@ -104,7 +104,7 @@ class DynamicNumericDiffCostFunction : public CostFunction {
|
||||
<< "You must call DynamicNumericDiffCostFunction::SetNumResiduals() "
|
||||
<< "before DynamicNumericDiffCostFunction::Evaluate().";
|
||||
|
||||
const vector<int16>& block_sizes = parameter_block_sizes();
|
||||
const vector<int32>& block_sizes = parameter_block_sizes();
|
||||
CHECK(!block_sizes.empty())
|
||||
<< "You must call DynamicNumericDiffCostFunction::AddParameterBlock() "
|
||||
<< "before DynamicNumericDiffCostFunction::Evaluate().";
|
||||
|
@@ -119,7 +119,7 @@ class GradientChecker {
|
||||
// Do a consistency check between the term and the template parameters.
|
||||
CHECK_EQ(M, term->num_residuals());
|
||||
const int num_residuals = M;
|
||||
const vector<int16>& block_sizes = term->parameter_block_sizes();
|
||||
const vector<int32>& block_sizes = term->parameter_block_sizes();
|
||||
const int num_blocks = block_sizes.size();
|
||||
|
||||
CHECK_LE(num_blocks, 5) << "Unable to test functions that take more "
|
||||
|
@@ -47,7 +47,7 @@ namespace internal {
|
||||
|
||||
class BlockStructureProto;
|
||||
|
||||
typedef int16 BlockSize;
|
||||
typedef int32 BlockSize;
|
||||
|
||||
struct Block {
|
||||
Block() : size(-1), position(-1) {}
|
||||
|
@@ -125,7 +125,7 @@ CompressedRowSparseMatrix::CompressedRowSparseMatrix(
|
||||
|
||||
// Find the cumulative sum of the row counts.
|
||||
for (int i = 1; i < num_rows_ + 1; ++i) {
|
||||
rows_[i] += rows_[i-1];
|
||||
rows_[i] += rows_[i - 1];
|
||||
}
|
||||
|
||||
CHECK_EQ(num_nonzeros(), m.num_nonzeros());
|
||||
@@ -217,8 +217,8 @@ void CompressedRowSparseMatrix::DeleteRows(int delta_rows) {
|
||||
num_rows_ -= delta_rows;
|
||||
rows_.resize(num_rows_ + 1);
|
||||
|
||||
// Walk the list of row blocks untill we reach the new number of
|
||||
// rows and then drop the rest of the row blocks.
|
||||
// Walk the list of row blocks until we reach the new number of rows
|
||||
// and the drop the rest of the row blocks.
|
||||
int num_row_blocks = 0;
|
||||
int num_rows = 0;
|
||||
while (num_row_blocks < row_blocks_.size() && num_rows < num_rows_) {
|
||||
@@ -380,6 +380,155 @@ CompressedRowSparseMatrix* CompressedRowSparseMatrix::Transpose() const {
|
||||
return transpose;
|
||||
}
|
||||
|
||||
namespace {
|
||||
// A ProductTerm is a term in the outer product of a matrix with
|
||||
// itself.
|
||||
struct ProductTerm {
|
||||
ProductTerm(const int row, const int col, const int index)
|
||||
: row(row), col(col), index(index) {
|
||||
}
|
||||
|
||||
bool operator<(const ProductTerm& right) const {
|
||||
if (row == right.row) {
|
||||
if (col == right.col) {
|
||||
return index < right.index;
|
||||
}
|
||||
return col < right.col;
|
||||
}
|
||||
return row < right.row;
|
||||
}
|
||||
|
||||
int row;
|
||||
int col;
|
||||
int index;
|
||||
};
|
||||
|
||||
CompressedRowSparseMatrix*
|
||||
CompressAndFillProgram(const int num_rows,
|
||||
const int num_cols,
|
||||
const vector<ProductTerm>& product,
|
||||
vector<int>* program) {
|
||||
CHECK_GT(product.size(), 0);
|
||||
|
||||
// Count the number of unique product term, which in turn is the
|
||||
// number of non-zeros in the outer product.
|
||||
int num_nonzeros = 1;
|
||||
for (int i = 1; i < product.size(); ++i) {
|
||||
if (product[i].row != product[i - 1].row ||
|
||||
product[i].col != product[i - 1].col) {
|
||||
++num_nonzeros;
|
||||
}
|
||||
}
|
||||
|
||||
CompressedRowSparseMatrix* matrix =
|
||||
new CompressedRowSparseMatrix(num_rows, num_cols, num_nonzeros);
|
||||
|
||||
int* crsm_rows = matrix->mutable_rows();
|
||||
std::fill(crsm_rows, crsm_rows + num_rows + 1, 0);
|
||||
int* crsm_cols = matrix->mutable_cols();
|
||||
std::fill(crsm_cols, crsm_cols + num_nonzeros, 0);
|
||||
|
||||
CHECK_NOTNULL(program)->clear();
|
||||
program->resize(product.size());
|
||||
|
||||
// Iterate over the sorted product terms. This means each row is
|
||||
// filled one at a time, and we are able to assign a position in the
|
||||
// values array to each term.
|
||||
//
|
||||
// If terms repeat, i.e., they contribute to the same entry in the
|
||||
// result matrix), then they do not affect the sparsity structure of
|
||||
// the result matrix.
|
||||
int nnz = 0;
|
||||
crsm_cols[0] = product[0].col;
|
||||
crsm_rows[product[0].row + 1]++;
|
||||
(*program)[product[0].index] = nnz;
|
||||
for (int i = 1; i < product.size(); ++i) {
|
||||
const ProductTerm& previous = product[i - 1];
|
||||
const ProductTerm& current = product[i];
|
||||
|
||||
// Sparsity structure is updated only if the term is not a repeat.
|
||||
if (previous.row != current.row || previous.col != current.col) {
|
||||
crsm_cols[++nnz] = current.col;
|
||||
crsm_rows[current.row + 1]++;
|
||||
}
|
||||
|
||||
// All terms get assigned the position in the values array where
|
||||
// their value is accumulated.
|
||||
(*program)[current.index] = nnz;
|
||||
}
|
||||
|
||||
for (int i = 1; i < num_rows + 1; ++i) {
|
||||
crsm_rows[i] += crsm_rows[i - 1];
|
||||
}
|
||||
|
||||
return matrix;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
CompressedRowSparseMatrix*
|
||||
CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
|
||||
const CompressedRowSparseMatrix& m,
|
||||
vector<int>* program) {
|
||||
CHECK_NOTNULL(program)->clear();
|
||||
CHECK_GT(m.num_nonzeros(), 0) << "Congratulations, "
|
||||
<< "you found a bug in Ceres. Please report it.";
|
||||
|
||||
vector<ProductTerm> product;
|
||||
const vector<int>& row_blocks = m.row_blocks();
|
||||
int row_block_begin = 0;
|
||||
// Iterate over row blocks
|
||||
for (int row_block = 0; row_block < row_blocks.size(); ++row_block) {
|
||||
const int row_block_end = row_block_begin + row_blocks[row_block];
|
||||
// Compute the outer product terms for just one row per row block.
|
||||
const int r = row_block_begin;
|
||||
// Compute the lower triangular part of the product.
|
||||
for (int idx1 = m.rows()[r]; idx1 < m.rows()[r + 1]; ++idx1) {
|
||||
for (int idx2 = m.rows()[r]; idx2 <= idx1; ++idx2) {
|
||||
product.push_back(ProductTerm(m.cols()[idx1], m.cols()[idx2], product.size()));
|
||||
}
|
||||
}
|
||||
row_block_begin = row_block_end;
|
||||
}
|
||||
CHECK_EQ(row_block_begin, m.num_rows());
|
||||
sort(product.begin(), product.end());
|
||||
return CompressAndFillProgram(m.num_cols(), m.num_cols(), product, program);
|
||||
}
|
||||
|
||||
void CompressedRowSparseMatrix::ComputeOuterProduct(
|
||||
const CompressedRowSparseMatrix& m,
|
||||
const vector<int>& program,
|
||||
CompressedRowSparseMatrix* result) {
|
||||
result->SetZero();
|
||||
double* values = result->mutable_values();
|
||||
const vector<int>& row_blocks = m.row_blocks();
|
||||
|
||||
int cursor = 0;
|
||||
int row_block_begin = 0;
|
||||
const double* m_values = m.values();
|
||||
const int* m_rows = m.rows();
|
||||
// Iterate over row blocks.
|
||||
for (int row_block = 0; row_block < row_blocks.size(); ++row_block) {
|
||||
const int row_block_end = row_block_begin + row_blocks[row_block];
|
||||
const int saved_cursor = cursor;
|
||||
for (int r = row_block_begin; r < row_block_end; ++r) {
|
||||
// Reuse the program segment for each row in this row block.
|
||||
cursor = saved_cursor;
|
||||
const int row_begin = m_rows[r];
|
||||
const int row_end = m_rows[r + 1];
|
||||
for (int idx1 = row_begin; idx1 < row_end; ++idx1) {
|
||||
const double v1 = m_values[idx1];
|
||||
for (int idx2 = row_begin; idx2 <= idx1; ++idx2, ++cursor) {
|
||||
values[program[cursor]] += v1 * m_values[idx2];
|
||||
}
|
||||
}
|
||||
}
|
||||
row_block_begin = row_block_end;
|
||||
}
|
||||
|
||||
CHECK_EQ(row_block_begin, m.num_rows());
|
||||
CHECK_EQ(cursor, program.size());
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace ceres
|
||||
|
@@ -128,6 +128,32 @@ class CompressedRowSparseMatrix : public SparseMatrix {
|
||||
const double* diagonal,
|
||||
const vector<int>& blocks);
|
||||
|
||||
// Compute the sparsity structure of the product m.transpose() * m
|
||||
// and create a CompressedRowSparseMatrix corresponding to it.
|
||||
//
|
||||
// Also compute a "program" vector, which for every term in the
|
||||
// outer product points to the entry in the values array of the
|
||||
// result matrix where it should be accumulated.
|
||||
//
|
||||
// This program is used by the ComputeOuterProduct function below to
|
||||
// compute the outer product.
|
||||
//
|
||||
// Since the entries of the program are the same for rows with the
|
||||
// same sparsity structure, the program only stores the result for
|
||||
// one row per row block. The ComputeOuterProduct function reuses
|
||||
// this information for each row in the row block.
|
||||
static CompressedRowSparseMatrix* CreateOuterProductMatrixAndProgram(
|
||||
const CompressedRowSparseMatrix& m,
|
||||
vector<int>* program);
|
||||
|
||||
// Compute the values array for the expression m.transpose() * m,
|
||||
// where the matrix used to store the result and a program have been
|
||||
// created using the CreateOuterProductMatrixAndProgram function
|
||||
// above.
|
||||
static void ComputeOuterProduct(const CompressedRowSparseMatrix& m,
|
||||
const vector<int>& program,
|
||||
CompressedRowSparseMatrix* result);
|
||||
|
||||
private:
|
||||
int num_rows_;
|
||||
int num_cols_;
|
||||
|
@@ -93,7 +93,7 @@ class GradientCheckingCostFunction : public CostFunction {
|
||||
DO_NOT_TAKE_OWNERSHIP,
|
||||
relative_step_size);
|
||||
|
||||
const vector<int16>& parameter_block_sizes =
|
||||
const vector<int32>& parameter_block_sizes =
|
||||
function->parameter_block_sizes();
|
||||
for (int i = 0; i < parameter_block_sizes.size(); ++i) {
|
||||
finite_diff_cost_function->AddParameterBlock(parameter_block_sizes[i]);
|
||||
@@ -117,7 +117,7 @@ class GradientCheckingCostFunction : public CostFunction {
|
||||
int num_residuals = function_->num_residuals();
|
||||
|
||||
// Make space for the jacobians of the two methods.
|
||||
const vector<int16>& block_sizes = function_->parameter_block_sizes();
|
||||
const vector<int32>& block_sizes = function_->parameter_block_sizes();
|
||||
vector<Matrix> term_jacobians(block_sizes.size());
|
||||
vector<Matrix> finite_difference_jacobians(block_sizes.size());
|
||||
vector<double*> term_jacobian_pointers(block_sizes.size());
|
||||
|
@@ -28,6 +28,8 @@
|
||||
//
|
||||
// Author: sameeragarwal@google.com (Sameer Agarwal)
|
||||
|
||||
#include <list>
|
||||
|
||||
#include "ceres/internal/eigen.h"
|
||||
#include "ceres/low_rank_inverse_hessian.h"
|
||||
#include "glog/logging.h"
|
||||
@@ -77,7 +79,6 @@ LowRankInverseHessian::LowRankInverseHessian(
|
||||
: num_parameters_(num_parameters),
|
||||
max_num_corrections_(max_num_corrections),
|
||||
use_approximate_eigenvalue_scaling_(use_approximate_eigenvalue_scaling),
|
||||
num_corrections_(0),
|
||||
approximate_eigenvalue_scale_(1.0),
|
||||
delta_x_history_(num_parameters, max_num_corrections),
|
||||
delta_gradient_history_(num_parameters, max_num_corrections),
|
||||
@@ -96,29 +97,20 @@ bool LowRankInverseHessian::Update(const Vector& delta_x,
|
||||
return false;
|
||||
}
|
||||
|
||||
if (num_corrections_ == max_num_corrections_) {
|
||||
// TODO(sameeragarwal): This can be done more efficiently using
|
||||
// a circular buffer/indexing scheme, but for simplicity we will
|
||||
// do the expensive copy for now.
|
||||
delta_x_history_.block(0, 0, num_parameters_, max_num_corrections_ - 1) =
|
||||
delta_x_history_
|
||||
.block(0, 1, num_parameters_, max_num_corrections_ - 1);
|
||||
|
||||
delta_gradient_history_
|
||||
.block(0, 0, num_parameters_, max_num_corrections_ - 1) =
|
||||
delta_gradient_history_
|
||||
.block(0, 1, num_parameters_, max_num_corrections_ - 1);
|
||||
|
||||
delta_x_dot_delta_gradient_.head(num_corrections_ - 1) =
|
||||
delta_x_dot_delta_gradient_.tail(num_corrections_ - 1);
|
||||
} else {
|
||||
++num_corrections_;
|
||||
int next = indices_.size();
|
||||
// Once the size of the list reaches max_num_corrections_, simulate
|
||||
// a circular buffer by removing the first element of the list and
|
||||
// making it the next position where the LBFGS history is stored.
|
||||
if (next == max_num_corrections_) {
|
||||
next = indices_.front();
|
||||
indices_.pop_front();
|
||||
}
|
||||
|
||||
delta_x_history_.col(num_corrections_ - 1) = delta_x;
|
||||
delta_gradient_history_.col(num_corrections_ - 1) = delta_gradient;
|
||||
delta_x_dot_delta_gradient_(num_corrections_ - 1) =
|
||||
delta_x_dot_delta_gradient;
|
||||
indices_.push_back(next);
|
||||
delta_x_history_.col(next) = delta_x;
|
||||
delta_gradient_history_.col(next) = delta_gradient;
|
||||
delta_x_dot_delta_gradient_(next) = delta_x_dot_delta_gradient;
|
||||
approximate_eigenvalue_scale_ =
|
||||
delta_x_dot_delta_gradient / delta_gradient.squaredNorm();
|
||||
return true;
|
||||
@@ -131,12 +123,16 @@ void LowRankInverseHessian::RightMultiply(const double* x_ptr,
|
||||
|
||||
search_direction = gradient;
|
||||
|
||||
Vector alpha(num_corrections_);
|
||||
const int num_corrections = indices_.size();
|
||||
Vector alpha(num_corrections);
|
||||
|
||||
for (int i = num_corrections_ - 1; i >= 0; --i) {
|
||||
alpha(i) = delta_x_history_.col(i).dot(search_direction) /
|
||||
delta_x_dot_delta_gradient_(i);
|
||||
search_direction -= alpha(i) * delta_gradient_history_.col(i);
|
||||
for (std::list<int>::const_reverse_iterator it = indices_.rbegin();
|
||||
it != indices_.rend();
|
||||
++it) {
|
||||
const double alpha_i = delta_x_history_.col(*it).dot(search_direction) /
|
||||
delta_x_dot_delta_gradient_(*it);
|
||||
search_direction -= alpha_i * delta_gradient_history_.col(*it);
|
||||
alpha(*it) = alpha_i;
|
||||
}
|
||||
|
||||
if (use_approximate_eigenvalue_scaling_) {
|
||||
@@ -177,10 +173,12 @@ void LowRankInverseHessian::RightMultiply(const double* x_ptr,
|
||||
<< "approximation.";
|
||||
}
|
||||
|
||||
for (int i = 0; i < num_corrections_; ++i) {
|
||||
const double beta = delta_gradient_history_.col(i).dot(search_direction) /
|
||||
delta_x_dot_delta_gradient_(i);
|
||||
search_direction += delta_x_history_.col(i) * (alpha(i) - beta);
|
||||
for (std::list<int>::const_iterator it = indices_.begin();
|
||||
it != indices_.end();
|
||||
++it) {
|
||||
const double beta = delta_gradient_history_.col(*it).dot(search_direction) /
|
||||
delta_x_dot_delta_gradient_(*it);
|
||||
search_direction += delta_x_history_.col(*it) * (alpha(*it) - beta);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -34,6 +34,8 @@
|
||||
#ifndef CERES_INTERNAL_LOW_RANK_INVERSE_HESSIAN_H_
|
||||
#define CERES_INTERNAL_LOW_RANK_INVERSE_HESSIAN_H_
|
||||
|
||||
#include <list>
|
||||
|
||||
#include "ceres/internal/eigen.h"
|
||||
#include "ceres/linear_operator.h"
|
||||
|
||||
@@ -93,11 +95,11 @@ class LowRankInverseHessian : public LinearOperator {
|
||||
const int num_parameters_;
|
||||
const int max_num_corrections_;
|
||||
const bool use_approximate_eigenvalue_scaling_;
|
||||
int num_corrections_;
|
||||
double approximate_eigenvalue_scale_;
|
||||
Matrix delta_x_history_;
|
||||
Matrix delta_gradient_history_;
|
||||
ColMajorMatrix delta_x_history_;
|
||||
ColMajorMatrix delta_gradient_history_;
|
||||
Vector delta_x_dot_delta_gradient_;
|
||||
std::list<int> indices_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
@@ -107,7 +107,7 @@ class Minimizer {
|
||||
options.line_search_sufficient_curvature_decrease;
|
||||
max_line_search_step_expansion =
|
||||
options.max_line_search_step_expansion;
|
||||
is_silent = false;
|
||||
is_silent = (options.logging_type == SILENT);
|
||||
evaluator = NULL;
|
||||
trust_region_strategy = NULL;
|
||||
jacobian = NULL;
|
||||
|
@@ -112,7 +112,9 @@
|
||||
// To avoid macro definition of ERROR.
|
||||
# define NOGDI
|
||||
// To avoid macro definition of min/max.
|
||||
# define NOMINMAX
|
||||
# ifndef NOMINMAX
|
||||
# define NOMINMAX
|
||||
# endif
|
||||
# include <windows.h>
|
||||
typedef CRITICAL_SECTION MutexType;
|
||||
#elif defined(CERES_HAVE_PTHREAD) && defined(CERES_HAVE_RWLOCK)
|
||||
|
@@ -120,7 +120,6 @@ Vector RemoveLeadingZeros(const Vector& polynomial_in) {
|
||||
}
|
||||
return polynomial_in.tail(polynomial_in.size() - i);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
void FindLinearPolynomialRoots(const Vector& polynomial,
|
||||
Vector* real,
|
||||
@@ -178,6 +177,7 @@ void FindQuadraticPolynomialRoots(const Vector& polynomial,
|
||||
(*imaginary)(1) = -sqrt_D / (2.0 * a);
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
bool FindPolynomialRoots(const Vector& polynomial_in,
|
||||
Vector* real,
|
||||
|
@@ -224,7 +224,7 @@ ResidualBlock* ProblemImpl::AddResidualBlock(
|
||||
cost_function->parameter_block_sizes().size());
|
||||
|
||||
// Check the sizes match.
|
||||
const vector<int16>& parameter_block_sizes =
|
||||
const vector<int32>& parameter_block_sizes =
|
||||
cost_function->parameter_block_sizes();
|
||||
|
||||
if (!options_.disable_all_safety_checks) {
|
||||
|
@@ -484,7 +484,7 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options,
|
||||
"Terminating: Function tolerance reached. "
|
||||
"No non-constant parameter blocks found.";
|
||||
summary->termination_type = CONVERGENCE;
|
||||
VLOG(1) << summary->message;
|
||||
VLOG_IF(1, options.logging_type != SILENT) << summary->message;
|
||||
|
||||
summary->initial_cost = summary->fixed_cost;
|
||||
summary->final_cost = summary->fixed_cost;
|
||||
@@ -620,6 +620,83 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options,
|
||||
|
||||
|
||||
#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
|
||||
bool LineSearchOptionsAreValid(const Solver::Options& options,
|
||||
string* message) {
|
||||
// Validate values for configuration parameters supplied by user.
|
||||
if ((options.line_search_direction_type == ceres::BFGS ||
|
||||
options.line_search_direction_type == ceres::LBFGS) &&
|
||||
options.line_search_type != ceres::WOLFE) {
|
||||
*message =
|
||||
string("Invalid configuration: require line_search_type == "
|
||||
"ceres::WOLFE when using (L)BFGS to ensure that underlying "
|
||||
"assumptions are guaranteed to be satisfied.");
|
||||
return false;
|
||||
}
|
||||
if (options.max_lbfgs_rank <= 0) {
|
||||
*message =
|
||||
string("Invalid configuration: require max_lbfgs_rank > 0");
|
||||
return false;
|
||||
}
|
||||
if (options.min_line_search_step_size <= 0.0) {
|
||||
*message =
|
||||
"Invalid configuration: require min_line_search_step_size > 0.0.";
|
||||
return false;
|
||||
}
|
||||
if (options.line_search_sufficient_function_decrease <= 0.0) {
|
||||
*message =
|
||||
string("Invalid configuration: require ") +
|
||||
string("line_search_sufficient_function_decrease > 0.0.");
|
||||
return false;
|
||||
}
|
||||
if (options.max_line_search_step_contraction <= 0.0 ||
|
||||
options.max_line_search_step_contraction >= 1.0) {
|
||||
*message = string("Invalid configuration: require ") +
|
||||
string("0.0 < max_line_search_step_contraction < 1.0.");
|
||||
return false;
|
||||
}
|
||||
if (options.min_line_search_step_contraction <=
|
||||
options.max_line_search_step_contraction ||
|
||||
options.min_line_search_step_contraction > 1.0) {
|
||||
*message = string("Invalid configuration: require ") +
|
||||
string("max_line_search_step_contraction < ") +
|
||||
string("min_line_search_step_contraction <= 1.0.");
|
||||
return false;
|
||||
}
|
||||
// Warn user if they have requested BISECTION interpolation, but constraints
|
||||
// on max/min step size change during line search prevent bisection scaling
|
||||
// from occurring. Warn only, as this is likely a user mistake, but one which
|
||||
// does not prevent us from continuing.
|
||||
LOG_IF(WARNING,
|
||||
(options.line_search_interpolation_type == ceres::BISECTION &&
|
||||
(options.max_line_search_step_contraction > 0.5 ||
|
||||
options.min_line_search_step_contraction < 0.5)))
|
||||
<< "Line search interpolation type is BISECTION, but specified "
|
||||
<< "max_line_search_step_contraction: "
|
||||
<< options.max_line_search_step_contraction << ", and "
|
||||
<< "min_line_search_step_contraction: "
|
||||
<< options.min_line_search_step_contraction
|
||||
<< ", prevent bisection (0.5) scaling, continuing with solve regardless.";
|
||||
if (options.max_num_line_search_step_size_iterations <= 0) {
|
||||
*message = string("Invalid configuration: require ") +
|
||||
string("max_num_line_search_step_size_iterations > 0.");
|
||||
return false;
|
||||
}
|
||||
if (options.line_search_sufficient_curvature_decrease <=
|
||||
options.line_search_sufficient_function_decrease ||
|
||||
options.line_search_sufficient_curvature_decrease > 1.0) {
|
||||
*message = string("Invalid configuration: require ") +
|
||||
string("line_search_sufficient_function_decrease < ") +
|
||||
string("line_search_sufficient_curvature_decrease < 1.0.");
|
||||
return false;
|
||||
}
|
||||
if (options.max_line_search_step_expansion <= 1.0) {
|
||||
*message = string("Invalid configuration: require ") +
|
||||
string("max_line_search_step_expansion > 1.0.");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void SolverImpl::LineSearchSolve(const Solver::Options& original_options,
|
||||
ProblemImpl* original_problem_impl,
|
||||
Solver::Summary* summary) {
|
||||
@@ -631,9 +708,8 @@ void SolverImpl::LineSearchSolve(const Solver::Options& original_options,
|
||||
// Reset the summary object to its default values.
|
||||
*CHECK_NOTNULL(summary) = Solver::Summary();
|
||||
|
||||
summary->minimizer_type = LINE_SEARCH;
|
||||
SummarizeGivenProgram(*original_program, summary);
|
||||
|
||||
summary->minimizer_type = LINE_SEARCH;
|
||||
summary->line_search_direction_type =
|
||||
original_options.line_search_direction_type;
|
||||
summary->max_lbfgs_rank = original_options.max_lbfgs_rank;
|
||||
@@ -643,84 +719,7 @@ void SolverImpl::LineSearchSolve(const Solver::Options& original_options,
|
||||
summary->nonlinear_conjugate_gradient_type =
|
||||
original_options.nonlinear_conjugate_gradient_type;
|
||||
|
||||
// Validate values for configuration parameters supplied by user.
|
||||
if ((original_options.line_search_direction_type == ceres::BFGS ||
|
||||
original_options.line_search_direction_type == ceres::LBFGS) &&
|
||||
original_options.line_search_type != ceres::WOLFE) {
|
||||
summary->message =
|
||||
string("Invalid configuration: require line_search_type == "
|
||||
"ceres::WOLFE when using (L)BFGS to ensure that underlying "
|
||||
"assumptions are guaranteed to be satisfied.");
|
||||
LOG(ERROR) << summary->message;
|
||||
return;
|
||||
}
|
||||
if (original_options.max_lbfgs_rank <= 0) {
|
||||
summary->message =
|
||||
string("Invalid configuration: require max_lbfgs_rank > 0");
|
||||
LOG(ERROR) << summary->message;
|
||||
return;
|
||||
}
|
||||
if (original_options.min_line_search_step_size <= 0.0) {
|
||||
summary->message =
|
||||
"Invalid configuration: min_line_search_step_size <= 0.0.";
|
||||
LOG(ERROR) << summary->message;
|
||||
return;
|
||||
}
|
||||
if (original_options.line_search_sufficient_function_decrease <= 0.0) {
|
||||
summary->message =
|
||||
string("Invalid configuration: require ") +
|
||||
string("line_search_sufficient_function_decrease <= 0.0.");
|
||||
LOG(ERROR) << summary->message;
|
||||
return;
|
||||
}
|
||||
if (original_options.max_line_search_step_contraction <= 0.0 ||
|
||||
original_options.max_line_search_step_contraction >= 1.0) {
|
||||
summary->message = string("Invalid configuration: require ") +
|
||||
string("0.0 < max_line_search_step_contraction < 1.0.");
|
||||
LOG(ERROR) << summary->message;
|
||||
return;
|
||||
}
|
||||
if (original_options.min_line_search_step_contraction <=
|
||||
original_options.max_line_search_step_contraction ||
|
||||
original_options.min_line_search_step_contraction > 1.0) {
|
||||
summary->message = string("Invalid configuration: require ") +
|
||||
string("max_line_search_step_contraction < ") +
|
||||
string("min_line_search_step_contraction <= 1.0.");
|
||||
LOG(ERROR) << summary->message;
|
||||
return;
|
||||
}
|
||||
// Warn user if they have requested BISECTION interpolation, but constraints
|
||||
// on max/min step size change during line search prevent bisection scaling
|
||||
// from occurring. Warn only, as this is likely a user mistake, but one which
|
||||
// does not prevent us from continuing.
|
||||
LOG_IF(WARNING,
|
||||
(original_options.line_search_interpolation_type == ceres::BISECTION &&
|
||||
(original_options.max_line_search_step_contraction > 0.5 ||
|
||||
original_options.min_line_search_step_contraction < 0.5)))
|
||||
<< "Line search interpolation type is BISECTION, but specified "
|
||||
<< "max_line_search_step_contraction: "
|
||||
<< original_options.max_line_search_step_contraction << ", and "
|
||||
<< "min_line_search_step_contraction: "
|
||||
<< original_options.min_line_search_step_contraction
|
||||
<< ", prevent bisection (0.5) scaling, continuing with solve regardless.";
|
||||
if (original_options.max_num_line_search_step_size_iterations <= 0) {
|
||||
summary->message = string("Invalid configuration: require ") +
|
||||
string("max_num_line_search_step_size_iterations > 0.");
|
||||
LOG(ERROR) << summary->message;
|
||||
return;
|
||||
}
|
||||
if (original_options.line_search_sufficient_curvature_decrease <=
|
||||
original_options.line_search_sufficient_function_decrease ||
|
||||
original_options.line_search_sufficient_curvature_decrease > 1.0) {
|
||||
summary->message = string("Invalid configuration: require ") +
|
||||
string("line_search_sufficient_function_decrease < ") +
|
||||
string("line_search_sufficient_curvature_decrease < 1.0.");
|
||||
LOG(ERROR) << summary->message;
|
||||
return;
|
||||
}
|
||||
if (original_options.max_line_search_step_expansion <= 1.0) {
|
||||
summary->message = string("Invalid configuration: require ") +
|
||||
string("max_line_search_step_expansion > 1.0.");
|
||||
if (!LineSearchOptionsAreValid(original_options, &summary->message)) {
|
||||
LOG(ERROR) << summary->message;
|
||||
return;
|
||||
}
|
||||
@@ -805,7 +804,7 @@ void SolverImpl::LineSearchSolve(const Solver::Options& original_options,
|
||||
"Terminating: Function tolerance reached. "
|
||||
"No non-constant parameter blocks found.";
|
||||
summary->termination_type = CONVERGENCE;
|
||||
VLOG(1) << summary->message;
|
||||
VLOG_IF(1, options.logging_type != SILENT) << summary->message;
|
||||
|
||||
const double post_process_start_time = WallTimeInSeconds();
|
||||
SetSummaryFinalCost(summary);
|
||||
|
@@ -77,37 +77,10 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImpl(
|
||||
const double* b,
|
||||
const LinearSolver::PerSolveOptions& per_solve_options,
|
||||
double * x) {
|
||||
switch (options_.sparse_linear_algebra_library_type) {
|
||||
case SUITE_SPARSE:
|
||||
return SolveImplUsingSuiteSparse(A, b, per_solve_options, x);
|
||||
case CX_SPARSE:
|
||||
return SolveImplUsingCXSparse(A, b, per_solve_options, x);
|
||||
default:
|
||||
LOG(FATAL) << "Unknown sparse linear algebra library : "
|
||||
<< options_.sparse_linear_algebra_library_type;
|
||||
}
|
||||
|
||||
LOG(FATAL) << "Unknown sparse linear algebra library : "
|
||||
<< options_.sparse_linear_algebra_library_type;
|
||||
return LinearSolver::Summary();
|
||||
}
|
||||
|
||||
#ifndef CERES_NO_CXSPARSE
|
||||
LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse(
|
||||
CompressedRowSparseMatrix* A,
|
||||
const double* b,
|
||||
const LinearSolver::PerSolveOptions& per_solve_options,
|
||||
double * x) {
|
||||
EventLogger event_logger("SparseNormalCholeskySolver::CXSparse::Solve");
|
||||
|
||||
LinearSolver::Summary summary;
|
||||
summary.num_iterations = 1;
|
||||
summary.termination_type = LINEAR_SOLVER_SUCCESS;
|
||||
summary.message = "Success.";
|
||||
|
||||
const int num_cols = A->num_cols();
|
||||
Vector Atb = Vector::Zero(num_cols);
|
||||
A->LeftMultiply(b, Atb.data());
|
||||
VectorRef(x, num_cols).setZero();
|
||||
A->LeftMultiply(b, x);
|
||||
|
||||
if (per_solve_options.D != NULL) {
|
||||
// Temporarily append a diagonal block to the A matrix, but undo
|
||||
@@ -123,10 +96,37 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse(
|
||||
A->AppendRows(*regularizer);
|
||||
}
|
||||
|
||||
VectorRef(x, num_cols).setZero();
|
||||
LinearSolver::Summary summary;
|
||||
switch (options_.sparse_linear_algebra_library_type) {
|
||||
case SUITE_SPARSE:
|
||||
summary = SolveImplUsingSuiteSparse(A, per_solve_options, x);
|
||||
break;
|
||||
case CX_SPARSE:
|
||||
summary = SolveImplUsingCXSparse(A, per_solve_options, x);
|
||||
break;
|
||||
default:
|
||||
LOG(FATAL) << "Unknown sparse linear algebra library : "
|
||||
<< options_.sparse_linear_algebra_library_type;
|
||||
}
|
||||
|
||||
// Wrap the augmented Jacobian in a compressed sparse column matrix.
|
||||
cs_di At = cxsparse_.CreateSparseMatrixTransposeView(A);
|
||||
if (per_solve_options.D != NULL) {
|
||||
A->DeleteRows(num_cols);
|
||||
}
|
||||
|
||||
return summary;
|
||||
}
|
||||
|
||||
#ifndef CERES_NO_CXSPARSE
|
||||
LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse(
|
||||
CompressedRowSparseMatrix* A,
|
||||
const LinearSolver::PerSolveOptions& per_solve_options,
|
||||
double * rhs_and_solution) {
|
||||
EventLogger event_logger("SparseNormalCholeskySolver::CXSparse::Solve");
|
||||
|
||||
LinearSolver::Summary summary;
|
||||
summary.num_iterations = 1;
|
||||
summary.termination_type = LINEAR_SOLVER_SUCCESS;
|
||||
summary.message = "Success.";
|
||||
|
||||
// Compute the normal equations. J'J delta = J'f and solve them
|
||||
// using a sparse Cholesky factorization. Notice that when compared
|
||||
@@ -135,13 +135,18 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse(
|
||||
// factorized. CHOLMOD/SuiteSparse on the other hand can just work
|
||||
// off of Jt to compute the Cholesky factorization of the normal
|
||||
// equations.
|
||||
cs_di* A2 = cxsparse_.TransposeMatrix(&At);
|
||||
cs_di* AtA = cxsparse_.MatrixMatrixMultiply(&At, A2);
|
||||
|
||||
cxsparse_.Free(A2);
|
||||
if (per_solve_options.D != NULL) {
|
||||
A->DeleteRows(num_cols);
|
||||
if (outer_product_.get() == NULL) {
|
||||
outer_product_.reset(
|
||||
CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
|
||||
*A, &pattern_));
|
||||
}
|
||||
|
||||
CompressedRowSparseMatrix::ComputeOuterProduct(
|
||||
*A, pattern_, outer_product_.get());
|
||||
cs_di AtA_view =
|
||||
cxsparse_.CreateSparseMatrixTransposeView(outer_product_.get());
|
||||
cs_di* AtA = &AtA_view;
|
||||
|
||||
event_logger.AddEvent("Setup");
|
||||
|
||||
// Compute symbolic factorization if not available.
|
||||
@@ -160,23 +165,17 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse(
|
||||
summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
|
||||
summary.message =
|
||||
"CXSparse failure. Unable to find symbolic factorization.";
|
||||
} else if (cxsparse_.SolveCholesky(AtA, cxsparse_factor_, Atb.data())) {
|
||||
VectorRef(x, Atb.rows()) = Atb;
|
||||
} else {
|
||||
} else if (!cxsparse_.SolveCholesky(AtA, cxsparse_factor_, rhs_and_solution)) {
|
||||
summary.termination_type = LINEAR_SOLVER_FAILURE;
|
||||
}
|
||||
event_logger.AddEvent("Solve");
|
||||
|
||||
cxsparse_.Free(AtA);
|
||||
event_logger.AddEvent("Teardown");
|
||||
return summary;
|
||||
}
|
||||
#else
|
||||
LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse(
|
||||
CompressedRowSparseMatrix* A,
|
||||
const double* b,
|
||||
const LinearSolver::PerSolveOptions& per_solve_options,
|
||||
double * x) {
|
||||
double * rhs_and_solution) {
|
||||
LOG(FATAL) << "No CXSparse support in Ceres.";
|
||||
|
||||
// Unreachable but MSVC does not know this.
|
||||
@@ -187,9 +186,8 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse(
|
||||
#ifndef CERES_NO_SUITESPARSE
|
||||
LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse(
|
||||
CompressedRowSparseMatrix* A,
|
||||
const double* b,
|
||||
const LinearSolver::PerSolveOptions& per_solve_options,
|
||||
double * x) {
|
||||
double * rhs_and_solution) {
|
||||
EventLogger event_logger("SparseNormalCholeskySolver::SuiteSparse::Solve");
|
||||
LinearSolver::Summary summary;
|
||||
summary.termination_type = LINEAR_SOLVER_SUCCESS;
|
||||
@@ -197,24 +195,6 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse(
|
||||
summary.message = "Success.";
|
||||
|
||||
const int num_cols = A->num_cols();
|
||||
Vector Atb = Vector::Zero(num_cols);
|
||||
A->LeftMultiply(b, Atb.data());
|
||||
|
||||
if (per_solve_options.D != NULL) {
|
||||
// Temporarily append a diagonal block to the A matrix, but undo
|
||||
// it before returning the matrix to the user.
|
||||
scoped_ptr<CompressedRowSparseMatrix> regularizer;
|
||||
if (A->col_blocks().size() > 0) {
|
||||
regularizer.reset(CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
|
||||
per_solve_options.D, A->col_blocks()));
|
||||
} else {
|
||||
regularizer.reset(new CompressedRowSparseMatrix(
|
||||
per_solve_options.D, num_cols));
|
||||
}
|
||||
A->AppendRows(*regularizer);
|
||||
}
|
||||
|
||||
VectorRef(x, num_cols).setZero();
|
||||
cholmod_sparse lhs = ss_.CreateSparseMatrixTransposeView(A);
|
||||
event_logger.AddEvent("Setup");
|
||||
|
||||
@@ -231,33 +211,23 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse(
|
||||
event_logger.AddEvent("Analysis");
|
||||
|
||||
if (factor_ == NULL) {
|
||||
if (per_solve_options.D != NULL) {
|
||||
A->DeleteRows(num_cols);
|
||||
}
|
||||
summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
|
||||
return summary;
|
||||
}
|
||||
|
||||
summary.termination_type = ss_.Cholesky(&lhs, factor_, &summary.message);
|
||||
if (summary.termination_type != LINEAR_SOLVER_SUCCESS) {
|
||||
if (per_solve_options.D != NULL) {
|
||||
A->DeleteRows(num_cols);
|
||||
}
|
||||
return summary;
|
||||
}
|
||||
|
||||
cholmod_dense* rhs = ss_.CreateDenseVector(Atb.data(), num_cols, num_cols);
|
||||
cholmod_dense* sol = ss_.Solve(factor_, rhs, &summary.message);
|
||||
cholmod_dense* rhs = ss_.CreateDenseVector(rhs_and_solution, num_cols, num_cols);
|
||||
cholmod_dense* solution = ss_.Solve(factor_, rhs, &summary.message);
|
||||
event_logger.AddEvent("Solve");
|
||||
|
||||
ss_.Free(rhs);
|
||||
if (per_solve_options.D != NULL) {
|
||||
A->DeleteRows(num_cols);
|
||||
}
|
||||
|
||||
if (sol != NULL) {
|
||||
memcpy(x, sol->x, num_cols * sizeof(*x));
|
||||
ss_.Free(sol);
|
||||
if (solution != NULL) {
|
||||
memcpy(rhs_and_solution, solution->x, num_cols * sizeof(*rhs_and_solution));
|
||||
ss_.Free(solution);
|
||||
} else {
|
||||
summary.termination_type = LINEAR_SOLVER_FAILURE;
|
||||
}
|
||||
@@ -268,9 +238,8 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse(
|
||||
#else
|
||||
LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse(
|
||||
CompressedRowSparseMatrix* A,
|
||||
const double* b,
|
||||
const LinearSolver::PerSolveOptions& per_solve_options,
|
||||
double * x) {
|
||||
double * rhs_and_solution) {
|
||||
LOG(FATAL) << "No SuiteSparse support in Ceres.";
|
||||
|
||||
// Unreachable but MSVC does not know this.
|
||||
|
@@ -62,16 +62,14 @@ class SparseNormalCholeskySolver : public CompressedRowSparseMatrixSolver {
|
||||
|
||||
LinearSolver::Summary SolveImplUsingSuiteSparse(
|
||||
CompressedRowSparseMatrix* A,
|
||||
const double* b,
|
||||
const LinearSolver::PerSolveOptions& options,
|
||||
double* x);
|
||||
double* rhs_and_solution);
|
||||
|
||||
// Crashes if CSparse is not installed.
|
||||
LinearSolver::Summary SolveImplUsingCXSparse(
|
||||
CompressedRowSparseMatrix* A,
|
||||
const double* b,
|
||||
const LinearSolver::PerSolveOptions& options,
|
||||
double* x);
|
||||
double* rhs_and_solution);
|
||||
|
||||
SuiteSparse ss_;
|
||||
// Cached factorization
|
||||
@@ -80,7 +78,8 @@ class SparseNormalCholeskySolver : public CompressedRowSparseMatrixSolver {
|
||||
CXSparse cxsparse_;
|
||||
// Cached factorization
|
||||
cs_dis* cxsparse_factor_;
|
||||
|
||||
scoped_ptr<CompressedRowSparseMatrix> outer_product_;
|
||||
vector<int> pattern_;
|
||||
const LinearSolver::Options options_;
|
||||
CERES_DISALLOW_COPY_AND_ASSIGN(SparseNormalCholeskySolver);
|
||||
};
|
||||
|
Reference in New Issue
Block a user