Bye-bye, SSBA!

With new bundle adjustment based on Ceres we don't need
SSBA library anymore. This also means we don't need ldl
library and libmv is no longer depends on colamd as well.
This commit is contained in:
Sergey Sharybin
2013-02-25 20:15:42 +00:00
parent 2833994a71
commit b7af3207cc
23 changed files with 0 additions and 5272 deletions

View File

@@ -28,14 +28,11 @@
set(INC set(INC
. .
../colamd/Include
third_party/ceres/include third_party/ceres/include
) )
set(INC_SYS set(INC_SYS
../Eigen3 ../Eigen3
third_party/ssba
third_party/ldl/Include
${PNG_INCLUDE_DIR} ${PNG_INCLUDE_DIR}
${ZLIB_INCLUDE_DIRS} ${ZLIB_INCLUDE_DIRS}
) )
@@ -83,9 +80,6 @@ set(SRC
third_party/gflags/gflags.cc third_party/gflags/gflags.cc
third_party/gflags/gflags_completions.cc third_party/gflags/gflags_completions.cc
third_party/gflags/gflags_reporting.cc third_party/gflags/gflags_reporting.cc
third_party/ldl/Source/ldl.c
third_party/ssba/Geometry/v3d_metricbundle.cpp
third_party/ssba/Math/v3d_optimization.cpp
libmv-capi.h libmv-capi.h
libmv/base/id_generator.h libmv/base/id_generator.h
@@ -143,16 +137,8 @@ set(SRC
third_party/gflags/gflags/gflags.h third_party/gflags/gflags/gflags.h
third_party/gflags/mutex.h third_party/gflags/mutex.h
third_party/gflags/util.h third_party/gflags/util.h
third_party/ldl/Include/ldl.h
third_party/msinttypes/inttypes.h third_party/msinttypes/inttypes.h
third_party/msinttypes/stdint.h third_party/msinttypes/stdint.h
third_party/ssba/Geometry/v3d_cameramatrix.h
third_party/ssba/Geometry/v3d_distortion.h
third_party/ssba/Geometry/v3d_metricbundle.h
third_party/ssba/Math/v3d_linear.h
third_party/ssba/Math/v3d_linear_utils.h
third_party/ssba/Math/v3d_mathutilities.h
third_party/ssba/Math/v3d_optimization.h
) )
if(WIN32) if(WIN32)

View File

@@ -22,9 +22,6 @@ src += env.Glob('libmv/simple_pipeline/*.cc')
src += env.Glob('libmv/tracking/*.cc') src += env.Glob('libmv/tracking/*.cc')
src += env.Glob('third_party/fast/*.c') src += env.Glob('third_party/fast/*.c')
src += env.Glob('third_party/gflags/*.cc') src += env.Glob('third_party/gflags/*.cc')
src += env.Glob('third_party/ldl/Source/*.c')
src += env.Glob('third_party/ssba/Geometry/*.cpp')
src += env.Glob('third_party/ssba/Math/*.cpp')
incs = '. ../Eigen3 third_party/ceres/include' incs = '. ../Eigen3 third_party/ceres/include'
incs += ' ' + env['BF_PNG_INC'] incs += ' ' + env['BF_PNG_INC']
@@ -41,8 +38,6 @@ else:
src += env.Glob("third_party/glog/src/*.cc") src += env.Glob("third_party/glog/src/*.cc")
incs += ' ./third_party/glog/src' incs += ' ./third_party/glog/src'
incs += ' ./third_party/ssba ./third_party/ldl/Include ../colamd/Include'
env.BlenderLib ( libname = 'extern_libmv', sources=src, includes=Split(incs), defines=defs, libtype=['extern', 'player'], priority=[20,137] ) env.BlenderLib ( libname = 'extern_libmv', sources=src, includes=Split(incs), defines=defs, libtype=['extern', 'player'], priority=[20,137] )
SConscript(['third_party/SConscript']) SConscript(['third_party/SConscript'])

View File

@@ -130,8 +130,6 @@ set(INC
set(INC_SYS set(INC_SYS
../Eigen3 ../Eigen3
third_party/ssba
third_party/ldl/Include
\${PNG_INCLUDE_DIR} \${PNG_INCLUDE_DIR}
\${ZLIB_INCLUDE_DIRS} \${ZLIB_INCLUDE_DIRS}
) )
@@ -241,8 +239,6 @@ else:
src += env.Glob("third_party/glog/src/*.cc") src += env.Glob("third_party/glog/src/*.cc")
incs += ' ./third_party/glog/src' incs += ' ./third_party/glog/src'
incs += ' ./third_party/ssba ./third_party/ldl/Include ../colamd/Include'
env.BlenderLib ( libname = 'extern_libmv', sources=src, includes=Split(incs), defines=defs, libtype=['extern', 'player'], priority=[20,137] ) env.BlenderLib ( libname = 'extern_libmv', sources=src, includes=Split(incs), defines=defs, libtype=['extern', 'player'], priority=[20,137] )
SConscript(['third_party/SConscript']) SConscript(['third_party/SConscript'])

View File

@@ -38,8 +38,6 @@
#include "glog/logging.h" #include "glog/logging.h"
#include "libmv/logging/logging.h" #include "libmv/logging/logging.h"
#include "Math/v3d_optimization.h"
#include "libmv/numeric/numeric.h" #include "libmv/numeric/numeric.h"
#include "libmv/tracking/esm_region_tracker.h" #include "libmv/tracking/esm_region_tracker.h"
@@ -96,7 +94,6 @@ void libmv_initLogging(const char *argv0)
google::SetCommandLineOption("v", "0"); google::SetCommandLineOption("v", "0");
google::SetCommandLineOption("stderrthreshold", "7"); google::SetCommandLineOption("stderrthreshold", "7");
google::SetCommandLineOption("minloglevel", "7"); google::SetCommandLineOption("minloglevel", "7");
V3D::optimizerVerbosenessLevel = 0;
} }
void libmv_startDebugLogging(void) void libmv_startDebugLogging(void)
@@ -105,7 +102,6 @@ void libmv_startDebugLogging(void)
google::SetCommandLineOption("v", "2"); google::SetCommandLineOption("v", "2");
google::SetCommandLineOption("stderrthreshold", "1"); google::SetCommandLineOption("stderrthreshold", "1");
google::SetCommandLineOption("minloglevel", "0"); google::SetCommandLineOption("minloglevel", "0");
V3D::optimizerVerbosenessLevel = 1;
} }
void libmv_setLoggingVerbosity(int verbosity) void libmv_setLoggingVerbosity(int verbosity)
@@ -114,7 +110,6 @@ void libmv_setLoggingVerbosity(int verbosity)
snprintf(val, sizeof(val), "%d", verbosity); snprintf(val, sizeof(val), "%d", verbosity);
google::SetCommandLineOption("v", val); google::SetCommandLineOption("v", val);
V3D::optimizerVerbosenessLevel = verbosity;
} }
/* ************ Utility ************ */ /* ************ Utility ************ */

View File

@@ -1,5 +0,0 @@
include_directories(../ufconfig)
include_directories(Include)
add_library(ldl Source/ldl.c)
LIBMV_INSTALL_THIRD_PARTY_LIB(ldl)

View File

@@ -1,39 +0,0 @@
May 31, 2007: version 2.0.0
* C-callable 64-bit version added
* ported to 64-bit MATLAB
* subdirectories added (Source/, Include/, Lib/, Demo/, Doc/, MATLAB/)
Dec 12, 2006: version 1.3.4
* minor MATLAB cleanup
Sept 11, 2006: version 1.3.1
* The ldl m-file renamed to ldlsparse, to avoid name conflict with the
new MATLAB ldl function (in MATLAB 7.3).
Apr 30, 2006: version 1.3
* requires AMD v2.0. ldlmain.c demo program modified, since AMD can now
handle jumbled matrices. Minor change to Makefile.
Aug 30, 2005:
* Makefile changed to use ../UFconfig/UFconfig.mk. License changed to
GNU LGPL.
July 4, 2005:
* user guide added. Since no changes to the code were made,
the version number (1.1) and code release date (Apr 22, 2005)
were left unchanged.
Apr. 22, 2005: LDL v1.1 released.
* No real changes were made. The code was revised so
that each routine fits on a single page in the documentation.
Dec 31, 2003: LDL v1.0 released.

View File

@@ -1,504 +0,0 @@
GNU LESSER GENERAL PUBLIC LICENSE
Version 2.1, February 1999
Copyright (C) 1991, 1999 Free Software Foundation, Inc.
51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
[This is the first released version of the Lesser GPL. It also counts
as the successor of the GNU Library Public License, version 2, hence
the version number 2.1.]
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
Licenses are intended to guarantee your freedom to share and change
free software--to make sure the software is free for all its users.
This license, the Lesser General Public License, applies to some
specially designated software packages--typically libraries--of the
Free Software Foundation and other authors who decide to use it. You
can use it too, but we suggest you first think carefully about whether
this license or the ordinary General Public License is the better
strategy to use in any particular case, based on the explanations below.
When we speak of free software, we are referring to freedom of use,
not price. Our General Public Licenses are designed to make sure that
you have the freedom to distribute copies of free software (and charge
for this service if you wish); that you receive source code or can get
it if you want it; that you can change the software and use pieces of
it in new free programs; and that you are informed that you can do
these things.
To protect your rights, we need to make restrictions that forbid
distributors to deny you these rights or to ask you to surrender these
rights. These restrictions translate to certain responsibilities for
you if you distribute copies of the library or if you modify it.
For example, if you distribute copies of the library, whether gratis
or for a fee, you must give the recipients all the rights that we gave
you. You must make sure that they, too, receive or can get the source
code. If you link other code with the library, you must provide
complete object files to the recipients, so that they can relink them
with the library after making changes to the library and recompiling
it. And you must show them these terms so they know their rights.
We protect your rights with a two-step method: (1) we copyright the
library, and (2) we offer you this license, which gives you legal
permission to copy, distribute and/or modify the library.
To protect each distributor, we want to make it very clear that
there is no warranty for the free library. Also, if the library is
modified by someone else and passed on, the recipients should know
that what they have is not the original version, so that the original
author's reputation will not be affected by problems that might be
introduced by others.
Finally, software patents pose a constant threat to the existence of
any free program. We wish to make sure that a company cannot
effectively restrict the users of a free program by obtaining a
restrictive license from a patent holder. Therefore, we insist that
any patent license obtained for a version of the library must be
consistent with the full freedom of use specified in this license.
Most GNU software, including some libraries, is covered by the
ordinary GNU General Public License. This license, the GNU Lesser
General Public License, applies to certain designated libraries, and
is quite different from the ordinary General Public License. We use
this license for certain libraries in order to permit linking those
libraries into non-free programs.
When a program is linked with a library, whether statically or using
a shared library, the combination of the two is legally speaking a
combined work, a derivative of the original library. The ordinary
General Public License therefore permits such linking only if the
entire combination fits its criteria of freedom. The Lesser General
Public License permits more lax criteria for linking other code with
the library.
We call this license the "Lesser" General Public License because it
does Less to protect the user's freedom than the ordinary General
Public License. It also provides other free software developers Less
of an advantage over competing non-free programs. These disadvantages
are the reason we use the ordinary General Public License for many
libraries. However, the Lesser license provides advantages in certain
special circumstances.
For example, on rare occasions, there may be a special need to
encourage the widest possible use of a certain library, so that it becomes
a de-facto standard. To achieve this, non-free programs must be
allowed to use the library. A more frequent case is that a free
library does the same job as widely used non-free libraries. In this
case, there is little to gain by limiting the free library to free
software only, so we use the Lesser General Public License.
In other cases, permission to use a particular library in non-free
programs enables a greater number of people to use a large body of
free software. For example, permission to use the GNU C Library in
non-free programs enables many more people to use the whole GNU
operating system, as well as its variant, the GNU/Linux operating
system.
Although the Lesser General Public License is Less protective of the
users' freedom, it does ensure that the user of a program that is
linked with the Library has the freedom and the wherewithal to run
that program using a modified version of the Library.
The precise terms and conditions for copying, distribution and
modification follow. Pay close attention to the difference between a
"work based on the library" and a "work that uses the library". The
former contains code derived from the library, whereas the latter must
be combined with the library in order to run.
GNU LESSER GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License Agreement applies to any software library or other
program which contains a notice placed by the copyright holder or
other authorized party saying it may be distributed under the terms of
this Lesser General Public License (also called "this License").
Each licensee is addressed as "you".
A "library" means a collection of software functions and/or data
prepared so as to be conveniently linked with application programs
(which use some of those functions and data) to form executables.
The "Library", below, refers to any such software library or work
which has been distributed under these terms. A "work based on the
Library" means either the Library or any derivative work under
copyright law: that is to say, a work containing the Library or a
portion of it, either verbatim or with modifications and/or translated
straightforwardly into another language. (Hereinafter, translation is
included without limitation in the term "modification".)
"Source code" for a work means the preferred form of the work for
making modifications to it. For a library, complete source code means
all the source code for all modules it contains, plus any associated
interface definition files, plus the scripts used to control compilation
and installation of the library.
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running a program using the Library is not restricted, and output from
such a program is covered only if its contents constitute a work based
on the Library (independent of the use of the Library in a tool for
writing it). Whether that is true depends on what the Library does
and what the program that uses the Library does.
1. You may copy and distribute verbatim copies of the Library's
complete source code as you receive it, in any medium, provided that
you conspicuously and appropriately publish on each copy an
appropriate copyright notice and disclaimer of warranty; keep intact
all the notices that refer to this License and to the absence of any
warranty; and distribute a copy of this License along with the
Library.
You may charge a fee for the physical act of transferring a copy,
and you may at your option offer warranty protection in exchange for a
fee.
2. You may modify your copy or copies of the Library or any portion
of it, thus forming a work based on the Library, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) The modified work must itself be a software library.
b) You must cause the files modified to carry prominent notices
stating that you changed the files and the date of any change.
c) You must cause the whole of the work to be licensed at no
charge to all third parties under the terms of this License.
d) If a facility in the modified Library refers to a function or a
table of data to be supplied by an application program that uses
the facility, other than as an argument passed when the facility
is invoked, then you must make a good faith effort to ensure that,
in the event an application does not supply such function or
table, the facility still operates, and performs whatever part of
its purpose remains meaningful.
(For example, a function in a library to compute square roots has
a purpose that is entirely well-defined independent of the
application. Therefore, Subsection 2d requires that any
application-supplied function or table used by this function must
be optional: if the application does not supply it, the square
root function must still compute square roots.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Library,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Library, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote
it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Library.
In addition, mere aggregation of another work not based on the Library
with the Library (or with a work based on the Library) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may opt to apply the terms of the ordinary GNU General Public
License instead of this License to a given copy of the Library. To do
this, you must alter all the notices that refer to this License, so
that they refer to the ordinary GNU General Public License, version 2,
instead of to this License. (If a newer version than version 2 of the
ordinary GNU General Public License has appeared, then you can specify
that version instead if you wish.) Do not make any other change in
these notices.
Once this change is made in a given copy, it is irreversible for
that copy, so the ordinary GNU General Public License applies to all
subsequent copies and derivative works made from that copy.
This option is useful when you wish to copy part of the code of
the Library into a program that is not a library.
4. You may copy and distribute the Library (or a portion or
derivative of it, under Section 2) in object code or executable form
under the terms of Sections 1 and 2 above provided that you accompany
it with the complete corresponding machine-readable source code, which
must be distributed under the terms of Sections 1 and 2 above on a
medium customarily used for software interchange.
If distribution of object code is made by offering access to copy
from a designated place, then offering equivalent access to copy the
source code from the same place satisfies the requirement to
distribute the source code, even though third parties are not
compelled to copy the source along with the object code.
5. A program that contains no derivative of any portion of the
Library, but is designed to work with the Library by being compiled or
linked with it, is called a "work that uses the Library". Such a
work, in isolation, is not a derivative work of the Library, and
therefore falls outside the scope of this License.
However, linking a "work that uses the Library" with the Library
creates an executable that is a derivative of the Library (because it
contains portions of the Library), rather than a "work that uses the
library". The executable is therefore covered by this License.
Section 6 states terms for distribution of such executables.
When a "work that uses the Library" uses material from a header file
that is part of the Library, the object code for the work may be a
derivative work of the Library even though the source code is not.
Whether this is true is especially significant if the work can be
linked without the Library, or if the work is itself a library. The
threshold for this to be true is not precisely defined by law.
If such an object file uses only numerical parameters, data
structure layouts and accessors, and small macros and small inline
functions (ten lines or less in length), then the use of the object
file is unrestricted, regardless of whether it is legally a derivative
work. (Executables containing this object code plus portions of the
Library will still fall under Section 6.)
Otherwise, if the work is a derivative of the Library, you may
distribute the object code for the work under the terms of Section 6.
Any executables containing that work also fall under Section 6,
whether or not they are linked directly with the Library itself.
6. As an exception to the Sections above, you may also combine or
link a "work that uses the Library" with the Library to produce a
work containing portions of the Library, and distribute that work
under terms of your choice, provided that the terms permit
modification of the work for the customer's own use and reverse
engineering for debugging such modifications.
You must give prominent notice with each copy of the work that the
Library is used in it and that the Library and its use are covered by
this License. You must supply a copy of this License. If the work
during execution displays copyright notices, you must include the
copyright notice for the Library among them, as well as a reference
directing the user to the copy of this License. Also, you must do one
of these things:
a) Accompany the work with the complete corresponding
machine-readable source code for the Library including whatever
changes were used in the work (which must be distributed under
Sections 1 and 2 above); and, if the work is an executable linked
with the Library, with the complete machine-readable "work that
uses the Library", as object code and/or source code, so that the
user can modify the Library and then relink to produce a modified
executable containing the modified Library. (It is understood
that the user who changes the contents of definitions files in the
Library will not necessarily be able to recompile the application
to use the modified definitions.)
b) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (1) uses at run time a
copy of the library already present on the user's computer system,
rather than copying library functions into the executable, and (2)
will operate properly with a modified version of the library, if
the user installs one, as long as the modified version is
interface-compatible with the version that the work was made with.
c) Accompany the work with a written offer, valid for at
least three years, to give the same user the materials
specified in Subsection 6a, above, for a charge no more
than the cost of performing this distribution.
d) If distribution of the work is made by offering access to copy
from a designated place, offer equivalent access to copy the above
specified materials from the same place.
e) Verify that the user has already received a copy of these
materials or that you have already sent this user a copy.
For an executable, the required form of the "work that uses the
Library" must include any data and utility programs needed for
reproducing the executable from it. However, as a special exception,
the materials to be distributed need not include anything that is
normally distributed (in either source or binary form) with the major
components (compiler, kernel, and so on) of the operating system on
which the executable runs, unless that component itself accompanies
the executable.
It may happen that this requirement contradicts the license
restrictions of other proprietary libraries that do not normally
accompany the operating system. Such a contradiction means you cannot
use both them and the Library together in an executable that you
distribute.
7. You may place library facilities that are a work based on the
Library side-by-side in a single library together with other library
facilities not covered by this License, and distribute such a combined
library, provided that the separate distribution of the work based on
the Library and of the other library facilities is otherwise
permitted, and provided that you do these two things:
a) Accompany the combined library with a copy of the same work
based on the Library, uncombined with any other library
facilities. This must be distributed under the terms of the
Sections above.
b) Give prominent notice with the combined library of the fact
that part of it is a work based on the Library, and explaining
where to find the accompanying uncombined form of the same work.
8. You may not copy, modify, sublicense, link with, or distribute
the Library except as expressly provided under this License. Any
attempt otherwise to copy, modify, sublicense, link with, or
distribute the Library is void, and will automatically terminate your
rights under this License. However, parties who have received copies,
or rights, from you under this License will not have their licenses
terminated so long as such parties remain in full compliance.
9. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Library or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Library (or any work based on the
Library), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Library or works based on it.
10. Each time you redistribute the Library (or any work based on the
Library), the recipient automatically receives a license from the
original licensor to copy, distribute, link with or modify the Library
subject to these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties with
this License.
11. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Library at all. For example, if a patent
license would not permit royalty-free redistribution of the Library by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Library.
If any portion of this section is held invalid or unenforceable under any
particular circumstance, the balance of the section is intended to apply,
and the section as a whole is intended to apply in other circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
12. If the distribution and/or use of the Library is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Library under this License may add
an explicit geographical distribution limitation excluding those countries,
so that distribution is permitted only in or among countries not thus
excluded. In such case, this License incorporates the limitation as if
written in the body of this License.
13. The Free Software Foundation may publish revised and/or new
versions of the Lesser General Public License from time to time.
Such new versions will be similar in spirit to the present version,
but may differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the Library
specifies a version number of this License which applies to it and
"any later version", you have the option of following the terms and
conditions either of that version or of any later version published by
the Free Software Foundation. If the Library does not specify a
license version number, you may choose any version ever published by
the Free Software Foundation.
14. If you wish to incorporate parts of the Library into other free
programs whose distribution conditions are incompatible with these,
write to the author to ask for permission. For software which is
copyrighted by the Free Software Foundation, write to the Free
Software Foundation; we sometimes make exceptions for this. Our
decision will be guided by the two goals of preserving the free status
of all derivatives of our free software and of promoting the sharing
and reuse of software generally.
NO WARRANTY
15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Libraries
If you develop a new library, and you want it to be of the greatest
possible use to the public, we recommend making it free software that
everyone can redistribute and change. You can do so by permitting
redistribution under these terms (or, alternatively, under the terms of the
ordinary General Public License).
To apply these terms, attach the following notices to the library. It is
safest to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least the
"copyright" line and a pointer to where the full notice is found.
<one line to give the library's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Also add information on how to contact you by electronic and paper mail.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the library, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the
library `Frob' (a library for tweaking knobs) written by James Random Hacker.
<signature of Ty Coon>, 1 April 1990
Ty Coon, President of Vice
That's all there is to it!

View File

@@ -1,104 +0,0 @@
/* ========================================================================== */
/* === ldl.h: include file for the LDL package ============================= */
/* ========================================================================== */
/* LDL Copyright (c) Timothy A Davis,
* University of Florida. All Rights Reserved. See README for the License.
*/
#include "UFconfig.h"
#ifdef LDL_LONG
#define LDL_int UF_long
#define LDL_ID UF_long_id
#define LDL_symbolic ldl_l_symbolic
#define LDL_numeric ldl_l_numeric
#define LDL_lsolve ldl_l_lsolve
#define LDL_dsolve ldl_l_dsolve
#define LDL_ltsolve ldl_l_ltsolve
#define LDL_perm ldl_l_perm
#define LDL_permt ldl_l_permt
#define LDL_valid_perm ldl_l_valid_perm
#define LDL_valid_matrix ldl_l_valid_matrix
#else
#define LDL_int int
#define LDL_ID "%d"
#define LDL_symbolic ldl_symbolic
#define LDL_numeric ldl_numeric
#define LDL_lsolve ldl_lsolve
#define LDL_dsolve ldl_dsolve
#define LDL_ltsolve ldl_ltsolve
#define LDL_perm ldl_perm
#define LDL_permt ldl_permt
#define LDL_valid_perm ldl_valid_perm
#define LDL_valid_matrix ldl_valid_matrix
#endif
/* ========================================================================== */
/* === int version ========================================================== */
/* ========================================================================== */
void ldl_symbolic (int n, int Ap [ ], int Ai [ ], int Lp [ ],
int Parent [ ], int Lnz [ ], int Flag [ ], int P [ ], int Pinv [ ]) ;
int ldl_numeric (int n, int Ap [ ], int Ai [ ], double Ax [ ],
int Lp [ ], int Parent [ ], int Lnz [ ], int Li [ ], double Lx [ ],
double D [ ], double Y [ ], int Pattern [ ], int Flag [ ],
int P [ ], int Pinv [ ]) ;
void ldl_lsolve (int n, double X [ ], int Lp [ ], int Li [ ],
double Lx [ ]) ;
void ldl_dsolve (int n, double X [ ], double D [ ]) ;
void ldl_ltsolve (int n, double X [ ], int Lp [ ], int Li [ ],
double Lx [ ]) ;
void ldl_perm (int n, double X [ ], double B [ ], int P [ ]) ;
void ldl_permt (int n, double X [ ], double B [ ], int P [ ]) ;
int ldl_valid_perm (int n, int P [ ], int Flag [ ]) ;
int ldl_valid_matrix ( int n, int Ap [ ], int Ai [ ]) ;
/* ========================================================================== */
/* === long version ========================================================= */
/* ========================================================================== */
void ldl_l_symbolic (UF_long n, UF_long Ap [ ], UF_long Ai [ ], UF_long Lp [ ],
UF_long Parent [ ], UF_long Lnz [ ], UF_long Flag [ ], UF_long P [ ],
UF_long Pinv [ ]) ;
UF_long ldl_l_numeric (UF_long n, UF_long Ap [ ], UF_long Ai [ ], double Ax [ ],
UF_long Lp [ ], UF_long Parent [ ], UF_long Lnz [ ], UF_long Li [ ],
double Lx [ ], double D [ ], double Y [ ], UF_long Pattern [ ],
UF_long Flag [ ], UF_long P [ ], UF_long Pinv [ ]) ;
void ldl_l_lsolve (UF_long n, double X [ ], UF_long Lp [ ], UF_long Li [ ],
double Lx [ ]) ;
void ldl_l_dsolve (UF_long n, double X [ ], double D [ ]) ;
void ldl_l_ltsolve (UF_long n, double X [ ], UF_long Lp [ ], UF_long Li [ ],
double Lx [ ]) ;
void ldl_l_perm (UF_long n, double X [ ], double B [ ], UF_long P [ ]) ;
void ldl_l_permt (UF_long n, double X [ ], double B [ ], UF_long P [ ]) ;
UF_long ldl_l_valid_perm (UF_long n, UF_long P [ ], UF_long Flag [ ]) ;
UF_long ldl_l_valid_matrix ( UF_long n, UF_long Ap [ ], UF_long Ai [ ]) ;
/* ========================================================================== */
/* === LDL version ========================================================== */
/* ========================================================================== */
#define LDL_DATE "Nov 1, 2007"
#define LDL_VERSION_CODE(main,sub) ((main) * 1000 + (sub))
#define LDL_MAIN_VERSION 2
#define LDL_SUB_VERSION 0
#define LDL_SUBSUB_VERSION 1
#define LDL_VERSION LDL_VERSION_CODE(LDL_MAIN_VERSION,LDL_SUB_VERSION)

View File

@@ -1,10 +0,0 @@
Project: LDL
URL: http://www.cise.ufl.edu/research/sparse/ldl/
License: LGPL2.1
Upstream version: 2.0.1 (despite the ChangeLog saying 2.0.0)
Local modifications:
* Deleted everything except ldl.c, ldl.h, the license, the ChangeLog, and the
README.

View File

@@ -1,136 +0,0 @@
LDL Version 2.0: a sparse LDL' factorization and solve package.
Written in C, with both a C and MATLAB mexFunction interface.
These routines are not terrifically fast (they do not use dense matrix kernels),
but the code is very short and concise. The purpose is to illustrate the
algorithms in a very concise and readable manner, primarily for educational
purposes. Although the code is very concise, this package is slightly faster
than the built-in sparse Cholesky factorization in MATLAB 6.5 (chol), when
using the same input permutation.
Requires UFconfig, in the ../UFconfig directory relative to this directory.
Quick start (Unix, or Windows with Cygwin):
To compile, test, and install LDL, you may wish to first obtain a copy of
AMD v2.0 from http://www.cise.ufl.edu/research/sparse, and place it in the
../AMD directory, relative to this directory. Next, type "make", which
will compile the LDL library and three demo main programs (one of which
requires AMD). It will also compile the LDL MATLAB mexFunction (if you
have MATLAB). Typing "make clean" will remove non-essential files.
AMD v2.0 or later is required. Its use is optional.
Quick start (for MATLAB users);
To compile, test, and install the LDL mexFunctions (ldlsparse and
ldlsymbol), start MATLAB in this directory and type ldl_install.
This works on any system supported by MATLAB.
--------------------------------------------------------------------------------
LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved.
LDL License:
Your use or distribution of LDL or any modified version of
LDL implies that you agree to this License.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
Permission is hereby granted to use or copy this program under the
terms of the GNU LGPL, provided that the Copyright, this License,
and the Availability of the original version is retained on all copies.
User documentation of any code that uses this code or any modified
version of this code must cite the Copyright, this License, the
Availability note, and "Used by permission." Permission to modify
the code and to distribute modified code is granted, provided the
Copyright, this License, and the Availability note are retained,
and a notice that the code was modified is included.
Availability:
http://www.cise.ufl.edu/research/sparse/ldl
Acknowledgements:
This work was supported by the National Science Foundation, under
grant CCR-0203270.
Portions of this work were done while on sabbatical at Stanford University
and Lawrence Berkeley National Laboratory (with funding from the SciDAC
program). I would like to thank Gene Golub, Esmond Ng, and Horst Simon
for making this sabbatical possible. I would like to thank Pete Stewart
for his comments on a draft of this software and paper.
--------------------------------------------------------------------------------
Files and directories in this distribution:
--------------------------------------------------------------------------------
Documentation, and compiling:
README.txt this file
Makefile for compiling LDL
ChangeLog changes since V1.0 (Dec 31, 2003)
License license
lesser.txt the GNU LGPL license
ldl_userguide.pdf user guide in PDF
ldl_userguide.ps user guide in postscript
ldl_userguide.tex user guide in Latex
ldl.bib bibliography for user guide
The LDL library itself:
ldl.c the C-callable routines
ldl.h include file for any code that calls LDL
A simple C main program that demonstrates how to use LDL:
ldlsimple.c a stand-alone C program, uses the basic features of LDL
ldlsimple.out output of ldlsimple
ldllsimple.c long integer version of ldlsimple.c
Demo C program, for testing LDL and providing an example of its use
ldlmain.c a stand-alone C main program that uses and tests LDL
Matrix a directory containing matrices used by ldlmain.c
ldlmain.out output of ldlmain
ldlamd.out output of ldlamd (ldlmain.c compiled with AMD)
ldllamd.out output of ldllamd (ldlmain.c compiled with AMD, long)
MATLAB-related, not required for use in a regular C program
Contents.m a list of the MATLAB-callable routines
ldl.m MATLAB help file for the LDL mexFunction
ldldemo.m MATLAB demo of how to use the LDL mexFunction
ldldemo.out diary output of ldldemo
ldltest.m to test the LDL mexFunction
ldltest.out diary output of ldltest
ldlmex.c the LDL mexFunction for MATLAB
ldlrow.m the numerical algorithm that LDL is based on
ldlmain2.m compiles and runs ldlmain.c as a MATLAB mexFunction
ldlmain2.out output of ldlmain2.m
ldlsymbolmex.c symbolic factorization using LDL (see SYMBFACT, ETREE)
ldlsymbol.m help file for the LDLSYMBOL mexFunction
ldl_install.m compile, install, and test LDL functions
ldl_make.m compile LDL (ldlsparse and ldlsymbol)
ldlsparse.m help for ldlsparse
See ldl.c for a description of how to use the code from a C program. Type
"help ldl" in MATLAB for information on how to use LDL in a MATLAB program.

View File

@@ -1,507 +0,0 @@
/* ========================================================================== */
/* === ldl.c: sparse LDL' factorization and solve package =================== */
/* ========================================================================== */
/* LDL: a simple set of routines for sparse LDL' factorization. These routines
* are not terrifically fast (they do not use dense matrix kernels), but the
* code is very short. The purpose is to illustrate the algorithms in a very
* concise manner, primarily for educational purposes. Although the code is
* very concise, this package is slightly faster than the built-in sparse
* Cholesky factorization in MATLAB 7.0 (chol), when using the same input
* permutation.
*
* The routines compute the LDL' factorization of a real sparse symmetric
* matrix A (or PAP' if a permutation P is supplied), and solve upper
* and lower triangular systems with the resulting L and D factors. If A is
* positive definite then the factorization will be accurate. A can be
* indefinite (with negative values on the diagonal D), but in this case no
* guarantee of accuracy is provided, since no numeric pivoting is performed.
*
* The n-by-n sparse matrix A is in compressed-column form. The nonzero values
* in column j are stored in Ax [Ap [j] ... Ap [j+1]-1], with corresponding row
* indices in Ai [Ap [j] ... Ap [j+1]-1]. Ap [0] = 0 is required, and thus
* nz = Ap [n] is the number of nonzeros in A. Ap is an int array of size n+1.
* The int array Ai and the double array Ax are of size nz. This data structure
* is identical to the one used by MATLAB, except for the following
* generalizations. The row indices in each column of A need not be in any
* particular order, although they must be in the range 0 to n-1. Duplicate
* entries can be present; any duplicates are summed. That is, if row index i
* appears twice in a column j, then the value of A (i,j) is the sum of the two
* entries. The data structure used here for the input matrix A is more
* flexible than MATLAB's, which requires sorted columns with no duplicate
* entries.
*
* Only the diagonal and upper triangular part of A (or PAP' if a permutation
* P is provided) is accessed. The lower triangular parts of the matrix A or
* PAP' can be present, but they are ignored.
*
* The optional input permutation is provided as an array P of length n. If
* P [k] = j, the row and column j of A is the kth row and column of PAP'.
* If P is present then the factorization is LDL' = PAP' or L*D*L' = A(P,P) in
* 0-based MATLAB notation. If P is not present (a null pointer) then no
* permutation is performed, and the factorization is LDL' = A.
*
* The lower triangular matrix L is stored in the same compressed-column
* form (an int Lp array of size n+1, an int Li array of size Lp [n], and a
* double array Lx of the same size as Li). It has a unit diagonal, which is
* not stored. The row indices in each column of L are always returned in
* ascending order, with no duplicate entries. This format is compatible with
* MATLAB, except that it would be more convenient for MATLAB to include the
* unit diagonal of L. Doing so here would add additional complexity to the
* code, and is thus omitted in the interest of keeping this code short and
* readable.
*
* The elimination tree is held in the Parent [0..n-1] array. It is normally
* not required by the user, but it is required by ldl_numeric. The diagonal
* matrix D is held as an array D [0..n-1] of size n.
*
* --------------------
* C-callable routines:
* --------------------
*
* ldl_symbolic: Given the pattern of A, computes the Lp and Parent arrays
* required by ldl_numeric. Takes time proportional to the number of
* nonzeros in L. Computes the inverse Pinv of P if P is provided.
* Also returns Lnz, the count of nonzeros in each column of L below
* the diagonal (this is not required by ldl_numeric).
* ldl_numeric: Given the pattern and numerical values of A, the Lp array,
* the Parent array, and P and Pinv if applicable, computes the
* pattern and numerical values of L and D.
* ldl_lsolve: Solves Lx=b for a dense vector b.
* ldl_dsolve: Solves Dx=b for a dense vector b.
* ldl_ltsolve: Solves L'x=b for a dense vector b.
* ldl_perm: Computes x=Pb for a dense vector b.
* ldl_permt: Computes x=P'b for a dense vector b.
* ldl_valid_perm: checks the validity of a permutation vector
* ldl_valid_matrix: checks the validity of the sparse matrix A
*
* ----------------------------
* Limitations of this package:
* ----------------------------
*
* In the interest of keeping this code simple and readable, ldl_symbolic and
* ldl_numeric assume their inputs are valid. You can check your own inputs
* prior to calling these routines with the ldl_valid_perm and ldl_valid_matrix
* routines. Except for the two ldl_valid_* routines, no routine checks to see
* if the array arguments are present (non-NULL). Like all C routines, no
* routine can determine if the arrays are long enough and don't overlap.
*
* The ldl_numeric does check the numerical factorization, however. It returns
* n if the factorization is successful. If D (k,k) is zero, then k is
* returned, and L is only partially computed.
*
* No pivoting to control fill-in is performed, which is often critical for
* obtaining good performance. I recommend that you compute the permutation P
* using AMD or SYMAMD (approximate minimum degree ordering routines), or an
* appropriate graph-partitioning based ordering. See the ldldemo.m routine for
* an example in MATLAB, and the ldlmain.c stand-alone C program for examples of
* how to find P. Routines for manipulating compressed-column matrices are
* available in UMFPACK. AMD, SYMAMD, UMFPACK, and this LDL package are all
* available at http://www.cise.ufl.edu/research/sparse.
*
* -------------------------
* Possible simplifications:
* -------------------------
*
* These routines could be made even simpler with a few additional assumptions.
* If no input permutation were performed, the caller would have to permute the
* matrix first, but the computation of Pinv, and the use of P and Pinv could be
* removed. If only the diagonal and upper triangular part of A or PAP' are
* present, then the tests in the "if (i < k)" statement in ldl_symbolic and
* "if (i <= k)" in ldl_numeric, are always true, and could be removed (i can
* equal k in ldl_symbolic, but then the body of the if statement would
* correctly do no work since Flag [k] == k). If we could assume that no
* duplicate entries are present, then the statement Y [i] += Ax [p] could be
* replaced with Y [i] = Ax [p] in ldl_numeric.
*
* --------------------------
* Description of the method:
* --------------------------
*
* LDL computes the symbolic factorization by finding the pattern of L one row
* at a time. It does this based on the following theory. Consider a sparse
* system Lx=b, where L, x, and b, are all sparse, and where L comes from a
* Cholesky (or LDL') factorization. The elimination tree (etree) of L is
* defined as follows. The parent of node j is the smallest k > j such that
* L (k,j) is nonzero. Node j has no parent if column j of L is completely zero
* below the diagonal (j is a root of the etree in this case). The nonzero
* pattern of x is the union of the paths from each node i to the root, for
* each nonzero b (i). To compute the numerical solution to Lx=b, we can
* traverse the columns of L corresponding to nonzero values of x. This
* traversal does not need to be done in the order 0 to n-1. It can be done in
* any "topological" order, such that x (i) is computed before x (j) if i is a
* descendant of j in the elimination tree.
*
* The row-form of the LDL' factorization is shown in the MATLAB function
* ldlrow.m in this LDL package. Note that row k of L is found via a sparse
* triangular solve of L (1:k-1, 1:k-1) \ A (1:k-1, k), to use 1-based MATLAB
* notation. Thus, we can start with the nonzero pattern of the kth column of
* A (above the diagonal), follow the paths up to the root of the etree of the
* (k-1)-by-(k-1) leading submatrix of L, and obtain the pattern of the kth row
* of L. Note that we only need the leading (k-1)-by-(k-1) submatrix of L to
* do this. The elimination tree can be constructed as we go.
*
* The symbolic factorization does the same thing, except that it discards the
* pattern of L as it is computed. It simply counts the number of nonzeros in
* each column of L and then constructs the Lp index array when it's done. The
* symbolic factorization does not need to do this in topological order.
* Compare ldl_symbolic with the first part of ldl_numeric, and note that the
* while (len > 0) loop is not present in ldl_symbolic.
*
* LDL Version 1.3, Copyright (c) 2006 by Timothy A Davis,
* University of Florida. All Rights Reserved. Developed while on sabbatical
* at Stanford University and Lawrence Berkeley National Laboratory. Refer to
* the README file for the License. Available at
* http://www.cise.ufl.edu/research/sparse.
*/
#include "ldl.h"
/* ========================================================================== */
/* === ldl_symbolic ========================================================= */
/* ========================================================================== */
/* The input to this routine is a sparse matrix A, stored in column form, and
* an optional permutation P. The output is the elimination tree
* and the number of nonzeros in each column of L. Parent [i] = k if k is the
* parent of i in the tree. The Parent array is required by ldl_numeric.
* Lnz [k] gives the number of nonzeros in the kth column of L, excluding the
* diagonal.
*
* One workspace vector (Flag) of size n is required.
*
* If P is NULL, then it is ignored. The factorization will be LDL' = A.
* Pinv is not computed. In this case, neither P nor Pinv are required by
* ldl_numeric.
*
* If P is not NULL, then it is assumed to be a valid permutation. If
* row and column j of A is the kth pivot, the P [k] = j. The factorization
* will be LDL' = PAP', or A (p,p) in MATLAB notation. The inverse permutation
* Pinv is computed, where Pinv [j] = k if P [k] = j. In this case, both P
* and Pinv are required as inputs to ldl_numeric.
*
* The floating-point operation count of the subsequent call to ldl_numeric
* is not returned, but could be computed after ldl_symbolic is done. It is
* the sum of (Lnz [k]) * (Lnz [k] + 2) for k = 0 to n-1.
*/
void LDL_symbolic
(
LDL_int n, /* A and L are n-by-n, where n >= 0 */
LDL_int Ap [ ], /* input of size n+1, not modified */
LDL_int Ai [ ], /* input of size nz=Ap[n], not modified */
LDL_int Lp [ ], /* output of size n+1, not defined on input */
LDL_int Parent [ ], /* output of size n, not defined on input */
LDL_int Lnz [ ], /* output of size n, not defined on input */
LDL_int Flag [ ], /* workspace of size n, not defn. on input or output */
LDL_int P [ ], /* optional input of size n */
LDL_int Pinv [ ] /* optional output of size n (used if P is not NULL) */
)
{
LDL_int i, k, p, kk, p2 ;
if (P)
{
/* If P is present then compute Pinv, the inverse of P */
for (k = 0 ; k < n ; k++)
{
Pinv [P [k]] = k ;
}
}
for (k = 0 ; k < n ; k++)
{
/* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */
Parent [k] = -1 ; /* parent of k is not yet known */
Flag [k] = k ; /* mark node k as visited */
Lnz [k] = 0 ; /* count of nonzeros in column k of L */
kk = (P) ? (P [k]) : (k) ; /* kth original, or permuted, column */
p2 = Ap [kk+1] ;
for (p = Ap [kk] ; p < p2 ; p++)
{
/* A (i,k) is nonzero (original or permuted A) */
i = (Pinv) ? (Pinv [Ai [p]]) : (Ai [p]) ;
if (i < k)
{
/* follow path from i to root of etree, stop at flagged node */
for ( ; Flag [i] != k ; i = Parent [i])
{
/* find parent of i if not yet determined */
if (Parent [i] == -1) Parent [i] = k ;
Lnz [i]++ ; /* L (k,i) is nonzero */
Flag [i] = k ; /* mark i as visited */
}
}
}
}
/* construct Lp index array from Lnz column counts */
Lp [0] = 0 ;
for (k = 0 ; k < n ; k++)
{
Lp [k+1] = Lp [k] + Lnz [k] ;
}
}
/* ========================================================================== */
/* === ldl_numeric ========================================================== */
/* ========================================================================== */
/* Given a sparse matrix A (the arguments n, Ap, Ai, and Ax) and its symbolic
* analysis (Lp and Parent, and optionally P and Pinv), compute the numeric LDL'
* factorization of A or PAP'. The outputs of this routine are arguments Li,
* Lx, and D. It also requires three size-n workspaces (Y, Pattern, and Flag).
*/
LDL_int LDL_numeric /* returns n if successful, k if D (k,k) is zero */
(
LDL_int n, /* A and L are n-by-n, where n >= 0 */
LDL_int Ap [ ], /* input of size n+1, not modified */
LDL_int Ai [ ], /* input of size nz=Ap[n], not modified */
double Ax [ ], /* input of size nz=Ap[n], not modified */
LDL_int Lp [ ], /* input of size n+1, not modified */
LDL_int Parent [ ], /* input of size n, not modified */
LDL_int Lnz [ ], /* output of size n, not defn. on input */
LDL_int Li [ ], /* output of size lnz=Lp[n], not defined on input */
double Lx [ ], /* output of size lnz=Lp[n], not defined on input */
double D [ ], /* output of size n, not defined on input */
double Y [ ], /* workspace of size n, not defn. on input or output */
LDL_int Pattern [ ],/* workspace of size n, not defn. on input or output */
LDL_int Flag [ ], /* workspace of size n, not defn. on input or output */
LDL_int P [ ], /* optional input of size n */
LDL_int Pinv [ ] /* optional input of size n */
)
{
double yi, l_ki ;
LDL_int i, k, p, kk, p2, len, top ;
for (k = 0 ; k < n ; k++)
{
/* compute nonzero Pattern of kth row of L, in topological order */
Y [k] = 0.0 ; /* Y(0:k) is now all zero */
top = n ; /* stack for pattern is empty */
Flag [k] = k ; /* mark node k as visited */
Lnz [k] = 0 ; /* count of nonzeros in column k of L */
kk = (P) ? (P [k]) : (k) ; /* kth original, or permuted, column */
p2 = Ap [kk+1] ;
for (p = Ap [kk] ; p < p2 ; p++)
{
i = (Pinv) ? (Pinv [Ai [p]]) : (Ai [p]) ; /* get A(i,k) */
if (i <= k)
{
Y [i] += Ax [p] ; /* scatter A(i,k) into Y (sum duplicates) */
for (len = 0 ; Flag [i] != k ; i = Parent [i])
{
Pattern [len++] = i ; /* L(k,i) is nonzero */
Flag [i] = k ; /* mark i as visited */
}
while (len > 0) Pattern [--top] = Pattern [--len] ;
}
}
/* compute numerical values kth row of L (a sparse triangular solve) */
D [k] = Y [k] ; /* get D(k,k) and clear Y(k) */
Y [k] = 0.0 ;
for ( ; top < n ; top++)
{
i = Pattern [top] ; /* Pattern [top:n-1] is pattern of L(:,k) */
yi = Y [i] ; /* get and clear Y(i) */
Y [i] = 0.0 ;
p2 = Lp [i] + Lnz [i] ;
for (p = Lp [i] ; p < p2 ; p++)
{
Y [Li [p]] -= Lx [p] * yi ;
}
l_ki = yi / D [i] ; /* the nonzero entry L(k,i) */
D [k] -= l_ki * yi ;
Li [p] = k ; /* store L(k,i) in column form of L */
Lx [p] = l_ki ;
Lnz [i]++ ; /* increment count of nonzeros in col i */
}
if (D [k] == 0.0) return (k) ; /* failure, D(k,k) is zero */
}
return (n) ; /* success, diagonal of D is all nonzero */
}
/* ========================================================================== */
/* === ldl_lsolve: solve Lx=b ============================================== */
/* ========================================================================== */
void LDL_lsolve
(
LDL_int n, /* L is n-by-n, where n >= 0 */
double X [ ], /* size n. right-hand-side on input, soln. on output */
LDL_int Lp [ ], /* input of size n+1, not modified */
LDL_int Li [ ], /* input of size lnz=Lp[n], not modified */
double Lx [ ] /* input of size lnz=Lp[n], not modified */
)
{
LDL_int j, p, p2 ;
for (j = 0 ; j < n ; j++)
{
p2 = Lp [j+1] ;
for (p = Lp [j] ; p < p2 ; p++)
{
X [Li [p]] -= Lx [p] * X [j] ;
}
}
}
/* ========================================================================== */
/* === ldl_dsolve: solve Dx=b ============================================== */
/* ========================================================================== */
void LDL_dsolve
(
LDL_int n, /* D is n-by-n, where n >= 0 */
double X [ ], /* size n. right-hand-side on input, soln. on output */
double D [ ] /* input of size n, not modified */
)
{
LDL_int j ;
for (j = 0 ; j < n ; j++)
{
X [j] /= D [j] ;
}
}
/* ========================================================================== */
/* === ldl_ltsolve: solve L'x=b ============================================ */
/* ========================================================================== */
void LDL_ltsolve
(
LDL_int n, /* L is n-by-n, where n >= 0 */
double X [ ], /* size n. right-hand-side on input, soln. on output */
LDL_int Lp [ ], /* input of size n+1, not modified */
LDL_int Li [ ], /* input of size lnz=Lp[n], not modified */
double Lx [ ] /* input of size lnz=Lp[n], not modified */
)
{
int j, p, p2 ;
for (j = n-1 ; j >= 0 ; j--)
{
p2 = Lp [j+1] ;
for (p = Lp [j] ; p < p2 ; p++)
{
X [j] -= Lx [p] * X [Li [p]] ;
}
}
}
/* ========================================================================== */
/* === ldl_perm: permute a vector, x=Pb ===================================== */
/* ========================================================================== */
void LDL_perm
(
LDL_int n, /* size of X, B, and P */
double X [ ], /* output of size n. */
double B [ ], /* input of size n. */
LDL_int P [ ] /* input permutation array of size n. */
)
{
LDL_int j ;
for (j = 0 ; j < n ; j++)
{
X [j] = B [P [j]] ;
}
}
/* ========================================================================== */
/* === ldl_permt: permute a vector, x=P'b =================================== */
/* ========================================================================== */
void LDL_permt
(
LDL_int n, /* size of X, B, and P */
double X [ ], /* output of size n. */
double B [ ], /* input of size n. */
LDL_int P [ ] /* input permutation array of size n. */
)
{
LDL_int j ;
for (j = 0 ; j < n ; j++)
{
X [P [j]] = B [j] ;
}
}
/* ========================================================================== */
/* === ldl_valid_perm: check if a permutation vector is valid =============== */
/* ========================================================================== */
LDL_int LDL_valid_perm /* returns 1 if valid, 0 otherwise */
(
LDL_int n,
LDL_int P [ ], /* input of size n, a permutation of 0:n-1 */
LDL_int Flag [ ] /* workspace of size n */
)
{
LDL_int j, k ;
if (n < 0 || !Flag)
{
return (0) ; /* n must be >= 0, and Flag must be present */
}
if (!P)
{
return (1) ; /* If NULL, P is assumed to be the identity perm. */
}
for (j = 0 ; j < n ; j++)
{
Flag [j] = 0 ; /* clear the Flag array */
}
for (k = 0 ; k < n ; k++)
{
j = P [k] ;
if (j < 0 || j >= n || Flag [j] != 0)
{
return (0) ; /* P is not valid */
}
Flag [j] = 1 ;
}
return (1) ; /* P is valid */
}
/* ========================================================================== */
/* === ldl_valid_matrix: check if a sparse matrix is valid ================== */
/* ========================================================================== */
/* This routine checks to see if a sparse matrix A is valid for input to
* ldl_symbolic and ldl_numeric. It returns 1 if the matrix is valid, 0
* otherwise. A is in sparse column form. The numerical values in column j
* are stored in Ax [Ap [j] ... Ap [j+1]-1], with row indices in
* Ai [Ap [j] ... Ap [j+1]-1]. The Ax array is not checked.
*/
LDL_int LDL_valid_matrix
(
LDL_int n,
LDL_int Ap [ ],
LDL_int Ai [ ]
)
{
LDL_int j, p ;
if (n < 0 || !Ap || !Ai || Ap [0] != 0)
{
return (0) ; /* n must be >= 0, and Ap and Ai must be present */
}
for (j = 0 ; j < n ; j++)
{
if (Ap [j] > Ap [j+1])
{
return (0) ; /* Ap must be monotonically nondecreasing */
}
}
for (p = 0 ; p < Ap [n] ; p++)
{
if (Ai [p] < 0 || Ai [p] >= n)
{
return (0) ; /* row indices must be in the range 0 to n-1 */
}
}
return (1) ; /* matrix is valid */
}

View File

@@ -1,165 +0,0 @@
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.

View File

@@ -1,204 +0,0 @@
// -*- C++ -*-
/*
Copyright (c) 2008 University of North Carolina at Chapel Hill
This file is part of SSBA (Simple Sparse Bundle Adjustment).
SSBA is free software: you can redistribute it and/or modify it under the
terms of the GNU Lesser General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option) any
later version.
SSBA is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License along
with SSBA. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef V3D_CAMERA_MATRIX_H
#define V3D_CAMERA_MATRIX_H
#include "Math/v3d_linear.h"
#include "Geometry/v3d_distortion.h"
namespace V3D
{
struct CameraMatrix
{
CameraMatrix()
{
makeIdentityMatrix(_K);
makeIdentityMatrix(_R);
makeZeroVector(_T);
this->updateCachedValues(true, true);
}
CameraMatrix(double f, double cx, double cy)
{
makeIdentityMatrix(_K);
_K[0][0] = f;
_K[1][1] = f;
_K[0][2] = cx;
_K[1][2] = cy;
makeIdentityMatrix(_R);
makeZeroVector(_T);
this->updateCachedValues(true, true);
}
CameraMatrix(Matrix3x3d const& K,
Matrix3x3d const& R,
Vector3d const& T)
: _K(K), _R(R), _T(T)
{
this->updateCachedValues(true, true);
}
void setIntrinsic(Matrix3x3d const& K) { _K = K; this->updateCachedValues(true, false); }
void setRotation(Matrix3x3d const& R) { _R = R; this->updateCachedValues(false, true); }
void setTranslation(Vector3d const& T) { _T = T; this->updateCachedValues(false, true); }
template <typename Mat>
void setOrientation(Mat const& RT)
{
_R[0][0] = RT[0][0]; _R[0][1] = RT[0][1]; _R[0][2] = RT[0][2];
_R[1][0] = RT[1][0]; _R[1][1] = RT[1][1]; _R[1][2] = RT[1][2];
_R[2][0] = RT[2][0]; _R[2][1] = RT[2][1]; _R[2][2] = RT[2][2];
_T[0] = RT[0][3]; _T[1] = RT[1][3]; _T[2] = RT[2][3];
this->updateCachedValues(false, true);
}
Matrix3x3d const& getIntrinsic() const { return _K; }
Matrix3x3d const& getRotation() const { return _R; }
Vector3d const& getTranslation() const { return _T; }
Matrix3x4d getOrientation() const
{
Matrix3x4d RT;
RT[0][0] = _R[0][0]; RT[0][1] = _R[0][1]; RT[0][2] = _R[0][2];
RT[1][0] = _R[1][0]; RT[1][1] = _R[1][1]; RT[1][2] = _R[1][2];
RT[2][0] = _R[2][0]; RT[2][1] = _R[2][1]; RT[2][2] = _R[2][2];
RT[0][3] = _T[0]; RT[1][3] = _T[1]; RT[2][3] = _T[2];
return RT;
}
Matrix3x4d getProjection() const
{
Matrix3x4d const RT = this->getOrientation();
return _K * RT;
}
double getFocalLength() const { return _K[0][0]; }
double getAspectRatio() const { return _K[1][1] / _K[0][0]; }
Vector2d getPrincipalPoint() const
{
Vector2d pp;
pp[0] = _K[0][2];
pp[1] = _K[1][2];
return pp;
}
Vector2d projectPoint(Vector3d const& X) const
{
Vector3d q = _K*(_R*X + _T);
Vector2d res;
res[0] = q[0]/q[2]; res[1] = q[1]/q[2];
return res;
}
template <typename Distortion>
Vector2d projectPoint(Distortion const& distortion, Vector3d const& X) const
{
Vector3d XX = _R*X + _T;
Vector2d p;
p[0] = XX[0] / XX[2];
p[1] = XX[1] / XX[2];
p = distortion(p);
Vector2d res;
res[0] = _K[0][0] * p[0] + _K[0][1] * p[1] + _K[0][2];
res[1] = _K[1][1] * p[1] + _K[1][2];
return res;
}
Vector3d unprojectPixel(Vector2d const &p, double depth = 1) const
{
Vector3d pp;
pp[0] = p[0]; pp[1] = p[1]; pp[2] = 1.0;
Vector3d ray = _invK * pp;
ray[0] *= depth/ray[2];
ray[1] *= depth/ray[2];
ray[2] = depth;
ray = _Rt * ray;
return _center + ray;
}
Vector3d transformPointIntoCameraSpace(Vector3d const& p) const
{
return _R*p + _T;
}
Vector3d transformPointFromCameraSpace(Vector3d const& p) const
{
return _Rt*(p-_T);
}
Vector3d transformDirectionFromCameraSpace(Vector3d const& dir) const
{
return _Rt*dir;
}
Vector3d const& cameraCenter() const
{
return _center;
}
Vector3d opticalAxis() const
{
return this->transformDirectionFromCameraSpace(makeVector3(0.0, 0.0, 1.0));
}
Vector3d upVector() const
{
return this->transformDirectionFromCameraSpace(makeVector3(0.0, 1.0, 0.0));
}
Vector3d rightVector() const
{
return this->transformDirectionFromCameraSpace(makeVector3(1.0, 0.0, 0.0));
}
Vector3d getRay(Vector2d const& p) const
{
Vector3d pp = makeVector3(p[0], p[1], 1.0);
Vector3d ray = _invK * pp;
ray = _Rt * ray;
normalizeVector(ray);
return ray;
}
protected:
void updateCachedValues(bool intrinsic, bool orientation)
{
if (intrinsic) _invK = invertedMatrix(_K);
if (orientation)
{
makeTransposedMatrix(_R, _Rt);
_center = _Rt * (-1.0 * _T);
}
}
Matrix3x3d _K, _R;
Vector3d _T;
Matrix3x3d _invK, _Rt;
Vector3d _center;
}; // end struct CameraMatrix
} // end namespace V3D
#endif

View File

@@ -1,97 +0,0 @@
// -*- C++ -*-
/*
Copyright (c) 2008 University of North Carolina at Chapel Hill
This file is part of SSBA (Simple Sparse Bundle Adjustment).
SSBA is free software: you can redistribute it and/or modify it under the
terms of the GNU Lesser General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option) any
later version.
SSBA is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License along
with SSBA. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef V3D_DISTORTION_H
#define V3D_DISTORTION_H
#include "Math/v3d_linear.h"
#include "Math/v3d_linear_utils.h"
namespace V3D
{
struct StdDistortionFunction
{
double k1, k2, p1, p2;
StdDistortionFunction()
: k1(0), k2(0), p1(0), p2(0)
{ }
Vector2d operator()(Vector2d const& xu) const
{
double const r2 = xu[0]*xu[0] + xu[1]*xu[1];
double const r4 = r2*r2;
double const kr = 1 + k1*r2 + k2*r4;
Vector2d xd;
xd[0] = kr * xu[0] + 2*p1*xu[0]*xu[1] + p2*(r2 + 2*xu[0]*xu[0]);
xd[1] = kr * xu[1] + 2*p2*xu[0]*xu[1] + p1*(r2 + 2*xu[1]*xu[1]);
return xd;
}
Matrix2x2d derivativeWrtRadialParameters(Vector2d const& xu) const
{
double const r2 = xu[0]*xu[0] + xu[1]*xu[1];
double const r4 = r2*r2;
//double const kr = 1 + k1*r2 + k2*r4;
Matrix2x2d deriv;
deriv[0][0] = xu[0] * r2; // d xd/d k1
deriv[0][1] = xu[0] * r4; // d xd/d k2
deriv[1][0] = xu[1] * r2; // d yd/d k1
deriv[1][1] = xu[1] * r4; // d yd/d k2
return deriv;
}
Matrix2x2d derivativeWrtTangentialParameters(Vector2d const& xu) const
{
double const r2 = xu[0]*xu[0] + xu[1]*xu[1];
//double const r4 = r2*r2;
//double const kr = 1 + k1*r2 + k2*r4;
Matrix2x2d deriv;
deriv[0][0] = 2*xu[0]*xu[1]; // d xd/d p1
deriv[0][1] = r2 + 2*xu[0]*xu[0]; // d xd/d p2
deriv[1][0] = r2 + 2*xu[1]*xu[1]; // d yd/d p1
deriv[1][1] = deriv[0][0]; // d yd/d p2
return deriv;
}
Matrix2x2d derivativeWrtUndistortedPoint(Vector2d const& xu) const
{
double const r2 = xu[0]*xu[0] + xu[1]*xu[1];
double const r4 = r2*r2;
double const kr = 1 + k1*r2 + k2*r4;
double const dkr = 2*k1 + 4*k2*r2;
Matrix2x2d deriv;
deriv[0][0] = kr + xu[0] * xu[0] * dkr + 2*p1*xu[1] + 6*p2*xu[0]; // d xd/d xu
deriv[0][1] = xu[0] * xu[1] * dkr + 2*p1*xu[0] + 2*p2*xu[1]; // d xd/d yu
deriv[1][0] = deriv[0][1]; // d yd/d xu
deriv[1][1] = kr + xu[1] * xu[1] * dkr + 6*p1*xu[1] + 2*p2*xu[0]; // d yd/d yu
return deriv;
}
}; // end struct StdDistortionFunction
} // end namespace V3D
#endif

View File

@@ -1,405 +0,0 @@
/*
Copyright (c) 2008 University of North Carolina at Chapel Hill
This file is part of SSBA (Simple Sparse Bundle Adjustment).
SSBA is free software: you can redistribute it and/or modify it under the
terms of the GNU Lesser General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option) any
later version.
SSBA is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License along
with SSBA. If not, see <http://www.gnu.org/licenses/>.
*/
#include "Geometry/v3d_metricbundle.h"
#if defined(V3DLIB_ENABLE_SUITESPARSE)
namespace
{
typedef V3D::InlineMatrix<double, 2, 4> Matrix2x4d;
typedef V3D::InlineMatrix<double, 4, 2> Matrix4x2d;
typedef V3D::InlineMatrix<double, 2, 6> Matrix2x6d;
} // end namespace <>
namespace V3D
{
void
MetricBundleOptimizerBase::updateParametersA(VectorArray<double> const& deltaAi)
{
Vector3d T, omega;
Matrix3x3d R0, dR;
for (int i = _nNonvaryingA; i < _nParametersA; ++i)
{
T = _cams[i].getTranslation();
T[0] += deltaAi[i][0];
T[1] += deltaAi[i][1];
T[2] += deltaAi[i][2];
_cams[i].setTranslation(T);
// Create incremental rotation using Rodriguez formula.
R0 = _cams[i].getRotation();
omega[0] = deltaAi[i][3];
omega[1] = deltaAi[i][4];
omega[2] = deltaAi[i][5];
createRotationMatrixRodriguez(omega, dR);
_cams[i].setRotation(dR * R0);
} // end for (i)
} // end MetricBundleOptimizerBase::updateParametersA()
void
MetricBundleOptimizerBase::updateParametersB(VectorArray<double> const& deltaBj)
{
for (int j = _nNonvaryingB; j < _nParametersB; ++j)
{
_Xs[j][0] += deltaBj[j][0];
_Xs[j][1] += deltaBj[j][1];
_Xs[j][2] += deltaBj[j][2];
}
} // end MetricBundleOptimizerBase::updateParametersB()
void
MetricBundleOptimizerBase::poseDerivatives(int i, int j, Vector3d& XX,
Matrix3x6d& d_dRT, Matrix3x3d& d_dX) const
{
XX = _cams[i].transformPointIntoCameraSpace(_Xs[j]);
// See Frank Dellaerts bundle adjustment tutorial.
// d(dR * R0 * X + t)/d omega = -[R0 * X]_x
Matrix3x3d J;
makeCrossProductMatrix(XX - _cams[i].getTranslation(), J);
scaleMatrixIP(-1.0, J);
// Now the transformation from world coords into camera space is xx = Rx + T
// Hence the derivative of x wrt. T is just the identity matrix.
makeIdentityMatrix(d_dRT);
copyMatrixSlice(J, 0, 0, 3, 3, d_dRT, 0, 3);
// The derivative of Rx+T wrt x is just R.
copyMatrix(_cams[i].getRotation(), d_dX);
} // end MetricBundleOptimizerBase::poseDerivatives()
//----------------------------------------------------------------------
void
StdMetricBundleOptimizer::fillJacobians(Matrix<double>& Ak,
Matrix<double>& Bk,
Matrix<double>& Ck,
int i, int j, int k)
{
Vector3d XX;
Matrix3x6d d_dRT;
Matrix3x3d d_dX;
this->poseDerivatives(i, j, XX, d_dRT, d_dX);
double const f = _cams[i].getFocalLength();
double const ar = _cams[i].getAspectRatio();
Matrix2x3d dp_dX;
double const bx = f / (XX[2] * XX[2]);
double const by = ar * bx;
dp_dX[0][0] = bx * XX[2]; dp_dX[0][1] = 0; dp_dX[0][2] = -bx * XX[0];
dp_dX[1][0] = 0; dp_dX[1][1] = by * XX[2]; dp_dX[1][2] = -by * XX[1];
multiply_A_B(dp_dX, d_dRT, Ak);
multiply_A_B(dp_dX, d_dX, Bk);
} // end StdMetricBundleOptimizer::fillJacobians()
//----------------------------------------------------------------------
void
CommonInternalsMetricBundleOptimizer::fillJacobians(Matrix<double>& Ak,
Matrix<double>& Bk,
Matrix<double>& Ck,
int i, int j, int k)
{
double const focalLength = _K[0][0];
Vector3d XX;
Matrix3x6d dXX_dRT;
Matrix3x3d dXX_dX;
this->poseDerivatives(i, j, XX, dXX_dRT, dXX_dX);
Vector2d xu; // undistorted image point
xu[0] = XX[0] / XX[2];
xu[1] = XX[1] / XX[2];
Vector2d const xd = _distortion(xu); // distorted image point
Matrix2x2d dp_dxd;
dp_dxd[0][0] = focalLength; dp_dxd[0][1] = 0;
dp_dxd[1][0] = 0; dp_dxd[1][1] = _cachedAspectRatio * focalLength;
{
// First, lets do the derivative wrt the structure and motion parameters.
Matrix2x3d dxu_dXX;
dxu_dXX[0][0] = 1.0f / XX[2]; dxu_dXX[0][1] = 0; dxu_dXX[0][2] = -XX[0]/(XX[2]*XX[2]);
dxu_dXX[1][0] = 0; dxu_dXX[1][1] = 1.0f / XX[2]; dxu_dXX[1][2] = -XX[1]/(XX[2]*XX[2]);
Matrix2x2d dxd_dxu = _distortion.derivativeWrtUndistortedPoint(xu);
Matrix2x2d dp_dxu = dp_dxd * dxd_dxu;
Matrix2x3d dp_dXX = dp_dxu * dxu_dXX;
multiply_A_B(dp_dXX, dXX_dRT, Ak);
multiply_A_B(dp_dXX, dXX_dX, Bk);
} // end scope
switch (_mode)
{
case FULL_BUNDLE_FOCAL_AND_RADIAL_K1:
{
// Focal length.
Ck[0][0] = xd[0];
Ck[1][0] = xd[1];
// For radial, k1 only.
Matrix2x2d dxd_dk1k2 = _distortion.derivativeWrtRadialParameters(xu);
Matrix2x2d d_dk1k2 = dp_dxd * dxd_dk1k2;
Ck[0][1] = d_dk1k2[0][0];
Ck[1][1] = d_dk1k2[1][0];
break;
}
case FULL_BUNDLE_FOCAL_AND_RADIAL:
{
// Focal length.
Ck[0][0] = xd[0];
Ck[1][0] = xd[1];
// Radial k1 and k2.
Matrix2x2d dxd_dk1k2 = _distortion.derivativeWrtRadialParameters(xu);
Matrix2x2d d_dk1k2 = dp_dxd * dxd_dk1k2;
copyMatrixSlice(d_dk1k2, 0, 0, 2, 2, Ck, 0, 1);
break;
}
case FULL_BUNDLE_RADIAL_TANGENTIAL:
{
Matrix2x2d dxd_dp1p2 = _distortion.derivativeWrtTangentialParameters(xu);
Matrix2x2d d_dp1p2 = dp_dxd * dxd_dp1p2;
copyMatrixSlice(d_dp1p2, 0, 0, 2, 2, Ck, 0, 5);
// No break here!
}
case FULL_BUNDLE_RADIAL:
{
Matrix2x2d dxd_dk1k2 = _distortion.derivativeWrtRadialParameters(xu);
Matrix2x2d d_dk1k2 = dp_dxd * dxd_dk1k2;
copyMatrixSlice(d_dk1k2, 0, 0, 2, 2, Ck, 0, 3);
// No break here!
}
case FULL_BUNDLE_FOCAL_LENGTH_PP:
{
Ck[0][1] = 1; Ck[0][2] = 0;
Ck[1][1] = 0; Ck[1][2] = 1;
// No break here!
}
case FULL_BUNDLE_FOCAL_LENGTH:
{
Ck[0][0] = xd[0];
Ck[1][0] = xd[1];
}
case FULL_BUNDLE_METRIC:
{
}
} // end switch
} // end CommonInternalsMetricBundleOptimizer::fillJacobians()
void
CommonInternalsMetricBundleOptimizer::updateParametersC(Vector<double> const& deltaC)
{
switch (_mode)
{
case FULL_BUNDLE_FOCAL_AND_RADIAL_K1:
{
_K[0][0] += deltaC[0];
_K[1][1] = _cachedAspectRatio * _K[0][0];
_distortion.k1 += deltaC[1];
break;
}
case FULL_BUNDLE_FOCAL_AND_RADIAL:
{
_K[0][0] += deltaC[0];
_K[1][1] = _cachedAspectRatio * _K[0][0];
_distortion.k1 += deltaC[1];
_distortion.k2 += deltaC[2];
break;
}
case FULL_BUNDLE_RADIAL_TANGENTIAL:
{
_distortion.p1 += deltaC[5];
_distortion.p2 += deltaC[6];
// No break here!
}
case FULL_BUNDLE_RADIAL:
{
_distortion.k1 += deltaC[3];
_distortion.k2 += deltaC[4];
// No break here!
}
case FULL_BUNDLE_FOCAL_LENGTH_PP:
{
_K[0][2] += deltaC[1];
_K[1][2] += deltaC[2];
// No break here!
}
case FULL_BUNDLE_FOCAL_LENGTH:
{
_K[0][0] += deltaC[0];
_K[1][1] = _cachedAspectRatio * _K[0][0];
}
case FULL_BUNDLE_METRIC:
{
}
} // end switch
} // end CommonInternalsMetricBundleOptimizer::updateParametersC()
//----------------------------------------------------------------------
void
VaryingInternalsMetricBundleOptimizer::fillJacobians(Matrix<double>& Ak,
Matrix<double>& Bk,
Matrix<double>& Ck,
int i, int j, int k)
{
Vector3d XX;
Matrix3x6d dXX_dRT;
Matrix3x3d dXX_dX;
this->poseDerivatives(i, j, XX, dXX_dRT, dXX_dX);
Vector2d xu; // undistorted image point
xu[0] = XX[0] / XX[2];
xu[1] = XX[1] / XX[2];
Vector2d const xd = _distortions[i](xu); // distorted image point
double const focalLength = _cams[i].getFocalLength();
double const aspectRatio = _cams[i].getAspectRatio();
Matrix2x2d dp_dxd;
dp_dxd[0][0] = focalLength; dp_dxd[0][1] = 0;
dp_dxd[1][0] = 0; dp_dxd[1][1] = aspectRatio * focalLength;
{
// First, lets do the derivative wrt the structure and motion parameters.
Matrix2x3d dxu_dXX;
dxu_dXX[0][0] = 1.0f / XX[2]; dxu_dXX[0][1] = 0; dxu_dXX[0][2] = -XX[0]/(XX[2]*XX[2]);
dxu_dXX[1][0] = 0; dxu_dXX[1][1] = 1.0f / XX[2]; dxu_dXX[1][2] = -XX[1]/(XX[2]*XX[2]);
Matrix2x2d dxd_dxu = _distortions[i].derivativeWrtUndistortedPoint(xu);
Matrix2x2d dp_dxu = dp_dxd * dxd_dxu;
Matrix2x3d dp_dXX = dp_dxu * dxu_dXX;
Matrix2x6d dp_dRT;
multiply_A_B(dp_dXX, dXX_dRT, dp_dRT);
copyMatrixSlice(dp_dRT, 0, 0, 2, 6, Ak, 0, 0);
multiply_A_B(dp_dXX, dXX_dX, Bk);
} // end scope
switch (_mode)
{
case FULL_BUNDLE_RADIAL_TANGENTIAL:
{
Matrix2x2d dxd_dp1p2 = _distortions[i].derivativeWrtTangentialParameters(xu);
Matrix2x2d d_dp1p2 = dp_dxd * dxd_dp1p2;
copyMatrixSlice(d_dp1p2, 0, 0, 2, 2, Ak, 0, 11);
// No break here!
}
case FULL_BUNDLE_RADIAL:
{
Matrix2x2d dxd_dk1k2 = _distortions[i].derivativeWrtRadialParameters(xu);
Matrix2x2d d_dk1k2 = dp_dxd * dxd_dk1k2;
copyMatrixSlice(d_dk1k2, 0, 0, 2, 2, Ak, 0, 9);
// No break here!
}
case FULL_BUNDLE_FOCAL_LENGTH_PP:
{
Ak[0][7] = 1; Ak[0][8] = 0;
Ak[1][7] = 0; Ak[1][8] = 1;
// No break here!
}
case FULL_BUNDLE_FOCAL_LENGTH:
{
Ak[0][6] = xd[0];
Ak[1][6] = xd[1];
}
case FULL_BUNDLE_METRIC:
{
}
} // end switch
} // end VaryingInternalsMetricBundleOptimizer::fillJacobians()
void
VaryingInternalsMetricBundleOptimizer::updateParametersA(VectorArray<double> const& deltaAi)
{
Vector3d T, omega;
Matrix3x3d R0, dR, K;
for (int i = _nNonvaryingA; i < _nParametersA; ++i)
{
Vector<double> const& deltaA = deltaAi[i];
T = _cams[i].getTranslation();
T[0] += deltaA[0];
T[1] += deltaA[1];
T[2] += deltaA[2];
_cams[i].setTranslation(T);
// Create incremental rotation using Rodriguez formula.
R0 = _cams[i].getRotation();
omega[0] = deltaA[3];
omega[1] = deltaA[4];
omega[2] = deltaA[5];
createRotationMatrixRodriguez(omega, dR);
_cams[i].setRotation(dR * R0);
K = _cams[i].getIntrinsic();
switch (_mode)
{
case FULL_BUNDLE_RADIAL_TANGENTIAL:
{
_distortions[i].p1 += deltaA[11];
_distortions[i].p2 += deltaA[12];
// No break here!
}
case FULL_BUNDLE_RADIAL:
{
_distortions[i].k1 += deltaA[9];
_distortions[i].k2 += deltaA[10];
// No break here!
}
case FULL_BUNDLE_FOCAL_LENGTH_PP:
{
K[0][2] += deltaA[7];
K[1][2] += deltaA[8];
// No break here!
}
case FULL_BUNDLE_FOCAL_LENGTH:
{
double const ar = K[1][1] / K[0][0];
K[0][0] += deltaA[6];
K[1][1] = ar * K[0][0];
}
case FULL_BUNDLE_METRIC:
{
}
} // end switch
_cams[i].setIntrinsic(K);
} // end for (i)
} // end VaryingInternalsMetricBundleOptimizer::updateParametersC()
} // end namespace V3D
#endif // defined(V3DLIB_ENABLE_SUITESPARSE)

View File

@@ -1,352 +0,0 @@
// -*- C++ -*-
/*
Copyright (c) 2008 University of North Carolina at Chapel Hill
This file is part of SSBA (Simple Sparse Bundle Adjustment).
SSBA is free software: you can redistribute it and/or modify it under the
terms of the GNU Lesser General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option) any
later version.
SSBA is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License along
with SSBA. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef V3D_METRICBUNDLE_H
#define V3D_METRICBUNDLE_H
# if defined(V3DLIB_ENABLE_SUITESPARSE)
#include "Math/v3d_optimization.h"
#include "Math/v3d_linear.h"
#include "Math/v3d_linear_utils.h"
#include "Geometry/v3d_cameramatrix.h"
#include "Geometry/v3d_distortion.h"
namespace V3D
{
// This structure provides some helper functions common to all metric BAs
struct MetricBundleOptimizerBase : public SparseLevenbergOptimizer
{
typedef SparseLevenbergOptimizer Base;
MetricBundleOptimizerBase(double inlierThreshold,
vector<CameraMatrix>& cams,
vector<Vector3d >& Xs,
vector<Vector2d > const& measurements,
vector<int> const& corrspondingView,
vector<int> const& corrspondingPoint,
int nAddParamsA, int nParamsC)
: SparseLevenbergOptimizer(2, cams.size(), 6+nAddParamsA, Xs.size(), 3, nParamsC,
corrspondingView, corrspondingPoint),
_cams(cams), _Xs(Xs), _measurements(measurements),
_savedTranslations(cams.size()), _savedRotations(cams.size()),
_savedXs(Xs.size()),
_inlierThreshold(inlierThreshold), _cachedParamLength(0.0)
{
// Since we assume that BA does not alter the inputs too much,
// we compute the overall length of the parameter vector in advance
// and return that value as the result of getParameterLength().
for (int i = _nNonvaryingA; i < _nParametersA; ++i)
{
_cachedParamLength += sqrNorm_L2(_cams[i].getTranslation());
_cachedParamLength += 3.0; // Assume eye(3) for R.
}
for (int j = _nNonvaryingB; j < _nParametersB; ++j)
_cachedParamLength += sqrNorm_L2(_Xs[j]);
_cachedParamLength = sqrt(_cachedParamLength);
}
// Huber robust cost function.
virtual void fillWeights(VectorArray<double> const& residual, Vector<double>& w)
{
for (unsigned int k = 0; k < w.size(); ++k)
{
Vector<double> const& r = residual[k];
double const e = norm_L2(r);
w[k] = (e < _inlierThreshold) ? 1.0 : sqrt(_inlierThreshold / e);
} // end for (k)
}
virtual double getParameterLength() const
{
return _cachedParamLength;
}
virtual void updateParametersA(VectorArray<double> const& deltaAi);
virtual void updateParametersB(VectorArray<double> const& deltaBj);
virtual void updateParametersC(Vector<double> const& deltaC)
{
(void)deltaC;
}
virtual void saveAllParameters()
{
for (int i = _nNonvaryingA; i < _nParametersA; ++i)
{
_savedTranslations[i] = _cams[i].getTranslation();
_savedRotations[i] = _cams[i].getRotation();
}
_savedXs = _Xs;
}
virtual void restoreAllParameters()
{
for (int i = _nNonvaryingA; i < _nParametersA; ++i)
{
_cams[i].setTranslation(_savedTranslations[i]);
_cams[i].setRotation(_savedRotations[i]);
}
_Xs = _savedXs;
}
protected:
typedef InlineMatrix<double, 3, 6> Matrix3x6d;
void poseDerivatives(int i, int j, Vector3d& XX,
Matrix3x6d& d_dRT, Matrix3x3d& d_dX) const;
vector<CameraMatrix>& _cams;
vector<Vector3d>& _Xs;
vector<Vector2d> const& _measurements;
vector<Vector3d> _savedTranslations;
vector<Matrix3x3d> _savedRotations;
vector<Vector3d> _savedXs;
double const _inlierThreshold;
double _cachedParamLength;
}; // end struct MetricBundleOptimizerBase
struct StdMetricBundleOptimizer : public MetricBundleOptimizerBase
{
typedef MetricBundleOptimizerBase Base;
StdMetricBundleOptimizer(double inlierThreshold,
vector<CameraMatrix>& cams,
vector<Vector3d >& Xs,
vector<Vector2d > const& measurements,
vector<int> const& corrspondingView,
vector<int> const& corrspondingPoint)
: MetricBundleOptimizerBase(inlierThreshold, cams, Xs, measurements,
corrspondingView, corrspondingPoint, 0, 0)
{ }
virtual void evalResidual(VectorArray<double>& e)
{
for (unsigned int k = 0; k < e.count(); ++k)
{
int const i = _correspondingParamA[k];
int const j = _correspondingParamB[k];
Vector2d const q = _cams[i].projectPoint(_Xs[j]);
e[k][0] = q[0] - _measurements[k][0];
e[k][1] = q[1] - _measurements[k][1];
}
}
virtual void fillJacobians(Matrix<double>& Ak, Matrix<double>& Bk, Matrix<double>& Ck,
int i, int j, int k);
}; // end struct StdMetricBundleOptimizer
//----------------------------------------------------------------------
enum
{
FULL_BUNDLE_METRIC = 0,
FULL_BUNDLE_FOCAL_LENGTH = 1, // f
FULL_BUNDLE_FOCAL_LENGTH_PP = 2, // f, cx, cy
FULL_BUNDLE_RADIAL = 3, // f, cx, cy, k1, k2
FULL_BUNDLE_RADIAL_TANGENTIAL = 4, // f, cx, cy, k1, k2, p1, p2
FULL_BUNDLE_FOCAL_AND_RADIAL_K1 = 5, // f, k1
FULL_BUNDLE_FOCAL_AND_RADIAL = 6, // f, k1, k2
};
struct CommonInternalsMetricBundleOptimizer : public MetricBundleOptimizerBase
{
static int globalParamDimensionFromMode(int mode)
{
switch (mode)
{
case FULL_BUNDLE_METRIC: return 0;
case FULL_BUNDLE_FOCAL_LENGTH: return 1;
case FULL_BUNDLE_FOCAL_LENGTH_PP: return 3;
case FULL_BUNDLE_RADIAL: return 5;
case FULL_BUNDLE_RADIAL_TANGENTIAL: return 7;
case FULL_BUNDLE_FOCAL_AND_RADIAL_K1: return 2;
case FULL_BUNDLE_FOCAL_AND_RADIAL: return 3;
}
return 0;
}
typedef MetricBundleOptimizerBase Base;
CommonInternalsMetricBundleOptimizer(int mode,
double inlierThreshold,
Matrix3x3d& K,
StdDistortionFunction& distortion,
vector<CameraMatrix>& cams,
vector<Vector3d >& Xs,
vector<Vector2d > const& measurements,
vector<int> const& corrspondingView,
vector<int> const& corrspondingPoint)
: MetricBundleOptimizerBase(inlierThreshold, cams, Xs, measurements,
corrspondingView, corrspondingPoint,
0, globalParamDimensionFromMode(mode)),
_mode(mode), _K(K), _distortion(distortion)
{
_cachedAspectRatio = K[1][1] / K[0][0];
}
Vector2d projectPoint(Vector3d const& X, int i) const
{
Vector3d const XX = _cams[i].transformPointIntoCameraSpace(X);
Vector2d p;
p[0] = XX[0] / XX[2];
p[1] = XX[1] / XX[2];
p = _distortion(p);
Vector2d res;
res[0] = _K[0][0] * p[0] + _K[0][1] * p[1] + _K[0][2];
res[1] = _K[1][1] * p[1] + _K[1][2];
return res;
}
virtual void evalResidual(VectorArray<double>& e)
{
for (unsigned int k = 0; k < e.count(); ++k)
{
int const i = _correspondingParamA[k];
int const j = _correspondingParamB[k];
Vector2d const q = this->projectPoint(_Xs[j], i);
e[k][0] = q[0] - _measurements[k][0];
e[k][1] = q[1] - _measurements[k][1];
}
}
virtual void fillJacobians(Matrix<double>& Ak, Matrix<double>& Bk, Matrix<double>& Ck,
int i, int j, int k);
virtual void updateParametersC(Vector<double> const& deltaC);
virtual void saveAllParameters()
{
Base::saveAllParameters();
_savedK = _K;
_savedDistortion = _distortion;
}
virtual void restoreAllParameters()
{
Base::restoreAllParameters();
_K = _savedK;
_distortion = _savedDistortion;
}
protected:
int _mode;
Matrix3x3d& _K;
StdDistortionFunction& _distortion;
Matrix3x3d _savedK;
StdDistortionFunction _savedDistortion;
double _cachedAspectRatio;
}; // end struct CommonInternalsMetricBundleOptimizer
//----------------------------------------------------------------------
struct VaryingInternalsMetricBundleOptimizer : public MetricBundleOptimizerBase
{
static int extParamDimensionFromMode(int mode)
{
switch (mode)
{
case FULL_BUNDLE_METRIC: return 0;
case FULL_BUNDLE_FOCAL_LENGTH: return 1;
case FULL_BUNDLE_FOCAL_LENGTH_PP: return 3;
case FULL_BUNDLE_RADIAL: return 5;
case FULL_BUNDLE_RADIAL_TANGENTIAL: return 7;
case FULL_BUNDLE_FOCAL_AND_RADIAL_K1: return 2;
case FULL_BUNDLE_FOCAL_AND_RADIAL: return 3;
}
return 0;
}
typedef MetricBundleOptimizerBase Base;
VaryingInternalsMetricBundleOptimizer(int mode,
double inlierThreshold,
std::vector<StdDistortionFunction>& distortions,
vector<CameraMatrix>& cams,
vector<Vector3d >& Xs,
vector<Vector2d > const& measurements,
vector<int> const& corrspondingView,
vector<int> const& corrspondingPoint)
: MetricBundleOptimizerBase(inlierThreshold, cams, Xs, measurements,
corrspondingView, corrspondingPoint,
extParamDimensionFromMode(mode), 0),
_mode(mode), _distortions(distortions),
_savedKs(cams.size()), _savedDistortions(cams.size())
{ }
Vector2d projectPoint(Vector3d const& X, int i) const
{
return _cams[i].projectPoint(_distortions[i], X);
}
virtual void evalResidual(VectorArray<double>& e)
{
for (unsigned int k = 0; k < e.count(); ++k)
{
int const i = _correspondingParamA[k];
int const j = _correspondingParamB[k];
Vector2d const q = this->projectPoint(_Xs[j], i);
e[k][0] = q[0] - _measurements[k][0];
e[k][1] = q[1] - _measurements[k][1];
}
}
virtual void fillJacobians(Matrix<double>& Ak, Matrix<double>& Bk, Matrix<double>& Ck,
int i, int j, int k);
virtual void updateParametersA(VectorArray<double> const& deltaAi);
virtual void saveAllParameters()
{
Base::saveAllParameters();
for (int i = _nNonvaryingA; i < _nParametersA; ++i)
_savedKs[i] = _cams[i].getIntrinsic();
std::copy(_distortions.begin(), _distortions.end(), _savedDistortions.begin());
}
virtual void restoreAllParameters()
{
Base::restoreAllParameters();
for (int i = _nNonvaryingA; i < _nParametersA; ++i)
_cams[i].setIntrinsic(_savedKs[i]);
std::copy(_savedDistortions.begin(), _savedDistortions.end(), _distortions.begin());
}
protected:
int _mode;
std::vector<StdDistortionFunction>& _distortions;
std::vector<Matrix3x3d> _savedKs;
std::vector<StdDistortionFunction> _savedDistortions;
}; // end struct VaryingInternalsMetricBundleOptimizer
} // end namespace V3D
# endif
#endif

View File

@@ -1,923 +0,0 @@
// -*- C++ -*-
/*
Copyright (c) 2008 University of North Carolina at Chapel Hill
This file is part of SSBA (Simple Sparse Bundle Adjustment).
SSBA is free software: you can redistribute it and/or modify it under the
terms of the GNU Lesser General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option) any
later version.
SSBA is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License along
with SSBA. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef V3D_LINEAR_H
#define V3D_LINEAR_H
#include <cassert>
#include <algorithm>
#include <vector>
#include <cmath>
namespace V3D
{
using namespace std;
//! Unboxed vector type
template <typename Elem, int Size>
struct InlineVectorBase
{
typedef Elem value_type;
typedef Elem element_type;
typedef Elem const * const_iterator;
typedef Elem * iterator;
static unsigned int size() { return Size; }
Elem& operator[](unsigned int i) { return _vec[i]; }
Elem operator[](unsigned int i) const { return _vec[i]; }
Elem& operator()(unsigned int i) { return _vec[i-1]; }
Elem operator()(unsigned int i) const { return _vec[i-1]; }
const_iterator begin() const { return _vec; }
iterator begin() { return _vec; }
const_iterator end() const { return _vec + Size; }
iterator end() { return _vec + Size; }
void newsize(unsigned int sz) const
{
assert(sz == Size);
}
protected:
Elem _vec[Size];
};
//! Boxed (heap allocated) vector.
template <typename Elem>
struct VectorBase
{
typedef Elem value_type;
typedef Elem element_type;
typedef Elem const * const_iterator;
typedef Elem * iterator;
VectorBase()
: _size(0), _ownsVec(true), _vec(0)
{ }
VectorBase(unsigned int size)
: _size(size), _ownsVec(true), _vec(0)
{
if (size > 0) _vec = new Elem[size];
}
VectorBase(unsigned int size, Elem * values)
: _size(size), _ownsVec(false), _vec(values)
{ }
VectorBase(VectorBase<Elem> const& a)
: _size(0), _ownsVec(true), _vec(0)
{
_size = a._size;
if (_size == 0) return;
_vec = new Elem[_size];
std::copy(a._vec, a._vec + _size, _vec);
}
~VectorBase() { if (_ownsVec && _vec != 0) delete [] _vec; }
VectorBase& operator=(VectorBase<Elem> const& a)
{
if (this == &a) return *this;
this->newsize(a._size);
std::copy(a._vec, a._vec + _size, _vec);
return *this;
}
unsigned int size() const { return _size; }
VectorBase<Elem>& newsize(unsigned int sz)
{
if (sz == _size) return *this;
assert(_ownsVec);
__destroy();
_size = sz;
if (_size > 0) _vec = new Elem[_size];
return *this;
}
Elem& operator[](unsigned int i) { return _vec[i]; }
Elem operator[](unsigned int i) const { return _vec[i]; }
Elem& operator()(unsigned int i) { return _vec[i-1]; }
Elem operator()(unsigned int i) const { return _vec[i-1]; }
const_iterator begin() const { return _vec; }
iterator begin() { return _vec; }
const_iterator end() const { return _vec + _size; }
iterator end() { return _vec + _size; }
protected:
void __destroy()
{
assert(_ownsVec);
if (_vec != 0) delete [] _vec;
_size = 0;
_vec = 0;
}
unsigned int _size;
bool _ownsVec;
Elem * _vec;
};
template <typename Elem, int Rows, int Cols>
struct InlineMatrixBase
{
typedef Elem value_type;
typedef Elem element_type;
typedef Elem * iterator;
typedef Elem const * const_iterator;
static unsigned int num_rows() { return Rows; }
static unsigned int num_cols() { return Cols; }
Elem * operator[](unsigned int row) { return _m[row]; }
Elem const * operator[](unsigned int row) const { return _m[row]; }
Elem& operator()(unsigned int row, unsigned int col) { return _m[row-1][col-1]; }
Elem operator()(unsigned int row, unsigned int col) const { return _m[row-1][col-1]; }
template <typename Vec>
void getRowSlice(unsigned int row, unsigned int first, unsigned int last, Vec& dst) const
{
for (unsigned int c = first; c < last; ++c) dst[c-first] = _m[row][c];
}
template <typename Vec>
void getColumnSlice(unsigned int first, unsigned int len, unsigned int col, Vec& dst) const
{
for (unsigned int r = 0; r < len; ++r) dst[r] = _m[r+first][col];
}
void newsize(unsigned int rows, unsigned int cols) const
{
assert(rows == Rows && cols == Cols);
}
const_iterator begin() const { return &_m[0][0]; }
iterator begin() { return &_m[0][0]; }
const_iterator end() const { return &_m[0][0] + Rows*Cols; }
iterator end() { return &_m[0][0] + Rows*Cols; }
protected:
Elem _m[Rows][Cols];
};
template <typename Elem>
struct MatrixBase
{
typedef Elem value_type;
typedef Elem element_type;
typedef Elem const * const_iterator;
typedef Elem * iterator;
MatrixBase()
: _rows(0), _cols(0), _ownsData(true), _m(0)
{ }
MatrixBase(unsigned int rows, unsigned int cols)
: _rows(rows), _cols(cols), _ownsData(true), _m(0)
{
if (_rows * _cols == 0) return;
_m = new Elem[rows*cols];
}
MatrixBase(unsigned int rows, unsigned int cols, Elem * values)
: _rows(rows), _cols(cols), _ownsData(false), _m(values)
{ }
MatrixBase(MatrixBase<Elem> const& a)
: _ownsData(true), _m(0)
{
_rows = a._rows; _cols = a._cols;
if (_rows * _cols == 0) return;
_m = new Elem[_rows*_cols];
std::copy(a._m, a._m+_rows*_cols, _m);
}
~MatrixBase()
{
if (_ownsData && _m != 0) delete [] _m;
}
MatrixBase& operator=(MatrixBase<Elem> const& a)
{
if (this == &a) return *this;
this->newsize(a.num_rows(), a.num_cols());
std::copy(a._m, a._m+_rows*_cols, _m);
return *this;
}
void newsize(unsigned int rows, unsigned int cols)
{
if (rows == _rows && cols == _cols) return;
assert(_ownsData);
__destroy();
_rows = rows;
_cols = cols;
if (_rows * _cols == 0) return;
_m = new Elem[rows*cols];
}
unsigned int num_rows() const { return _rows; }
unsigned int num_cols() const { return _cols; }
Elem * operator[](unsigned int row) { return _m + row*_cols; }
Elem const * operator[](unsigned int row) const { return _m + row*_cols; }
Elem& operator()(unsigned int row, unsigned int col) { return _m[(row-1)*_cols + col-1]; }
Elem operator()(unsigned int row, unsigned int col) const { return _m[(row-1)*_cols + col-1]; }
const_iterator begin() const { return _m; }
iterator begin() { return _m; }
const_iterator end() const { return _m + _rows*_cols; }
iterator end() { return _m + _rows*_cols; }
template <typename Vec>
void getRowSlice(unsigned int row, unsigned int first, unsigned int last, Vec& dst) const
{
Elem const * v = (*this)[row];
for (unsigned int c = first; c < last; ++c) dst[c-first] = v[c];
}
template <typename Vec>
void getColumnSlice(unsigned int first, unsigned int len, unsigned int col, Vec& dst) const
{
for (unsigned int r = 0; r < len; ++r) dst[r] = _m[r+first][col];
}
protected:
void __destroy()
{
assert(_ownsData);
if (_m != 0) delete [] _m;
_m = 0;
_rows = _cols = 0;
}
unsigned int _rows, _cols;
bool _ownsData;
Elem * _m;
};
template <typename T>
struct CCS_Matrix
{
CCS_Matrix()
: _rows(0), _cols(0)
{ }
CCS_Matrix(int const rows, int const cols, vector<pair<int, int> > const& nonZeros)
: _rows(rows), _cols(cols)
{
this->initialize(nonZeros);
}
CCS_Matrix(CCS_Matrix const& b)
: _rows(b._rows), _cols(b._cols),
_colStarts(b._colStarts), _rowIdxs(b._rowIdxs), _destIdxs(b._destIdxs), _values(b._values)
{ }
CCS_Matrix& operator=(CCS_Matrix const& b)
{
if (this == &b) return *this;
_rows = b._rows;
_cols = b._cols;
_colStarts = b._colStarts;
_rowIdxs = b._rowIdxs;
_destIdxs = b._destIdxs;
_values = b._values;
return *this;
}
void create(int const rows, int const cols, vector<pair<int, int> > const& nonZeros)
{
_rows = rows;
_cols = cols;
this->initialize(nonZeros);
}
unsigned int num_rows() const { return _rows; }
unsigned int num_cols() const { return _cols; }
int getNonzeroCount() const { return _values.size(); }
T const * getValues() const { return &_values[0]; }
T * getValues() { return &_values[0]; }
int const * getDestIndices() const { return &_destIdxs[0]; }
int const * getColumnStarts() const { return &_colStarts[0]; }
int const * getRowIndices() const { return &_rowIdxs[0]; }
void getRowRange(unsigned int col, unsigned int& firstRow, unsigned int& lastRow) const
{
firstRow = _rowIdxs[_colStarts[col]];
lastRow = _rowIdxs[_colStarts[col+1]-1]+1;
}
template <typename Vec>
void getColumnSlice(unsigned int first, unsigned int len, unsigned int col, Vec& dst) const
{
unsigned int const last = first + len;
for (int r = 0; r < len; ++r) dst[r] = 0; // Fill vector with zeros
int const colStart = _colStarts[col];
int const colEnd = _colStarts[col+1];
int i = colStart;
int r;
// Skip rows less than the given start row
while (i < colEnd && (r = _rowIdxs[i]) < first) ++i;
// Copy elements until the final row
while (i < colEnd && (r = _rowIdxs[i]) < last)
{
dst[r-first] = _values[i];
++i;
}
} // end getColumnSlice()
int getColumnNonzeroCount(unsigned int col) const
{
int const colStart = _colStarts[col];
int const colEnd = _colStarts[col+1];
return colEnd - colStart;
}
template <typename VecA, typename VecB>
void getSparseColumn(unsigned int col, VecA& rows, VecB& values) const
{
int const colStart = _colStarts[col];
int const colEnd = _colStarts[col+1];
int const nnz = colEnd - colStart;
for (int i = 0; i < nnz; ++i)
{
rows[i] = _rowIdxs[colStart + i];
values[i] = _values[colStart + i];
}
}
protected:
struct NonzeroInfo
{
int row, col, serial;
// Sort wrt the column first
bool operator<(NonzeroInfo const& rhs) const
{
if (col < rhs.col) return true;
if (col > rhs.col) return false;
return row < rhs.row;
}
};
void initialize(std::vector<std::pair<int, int> > const& nonZeros)
{
using namespace std;
int const nnz = nonZeros.size();
_colStarts.resize(_cols + 1);
_rowIdxs.resize(nnz);
vector<NonzeroInfo> nz(nnz);
for (int k = 0; k < nnz; ++k)
{
nz[k].row = nonZeros[k].first;
nz[k].col = nonZeros[k].second;
nz[k].serial = k;
}
// Sort in column major order
std::sort(nz.begin(), nz.end());
for (size_t k = 0; k < nnz; ++k) _rowIdxs[k] = nz[k].row;
int curCol = -1;
for (int k = 0; k < nnz; ++k)
{
NonzeroInfo const& el = nz[k];
if (el.col != curCol)
{
// Update empty cols between
for (int c = curCol+1; c < el.col; ++c) _colStarts[c] = k;
curCol = el.col;
_colStarts[curCol] = k;
} // end if
} // end for (k)
// Update remaining columns
for (int c = curCol+1; c <= _cols; ++c) _colStarts[c] = nnz;
_destIdxs.resize(nnz);
for (int k = 0; k < nnz; ++k) _destIdxs[nz[k].serial] = k;
_values.resize(nnz);
} // end initialize()
int _rows, _cols;
std::vector<int> _colStarts;
std::vector<int> _rowIdxs;
std::vector<int> _destIdxs;
std::vector<T> _values;
}; // end struct CCS_Matrix
//----------------------------------------------------------------------
template <typename Vec, typename Elem>
inline void
fillVector(Vec& v, Elem val)
{
// We do not use std::fill since we rely only on size() and operator[] member functions.
for (unsigned int i = 0; i < v.size(); ++i) v[i] = val;
}
template <typename Vec>
inline void
makeZeroVector(Vec& v)
{
fillVector(v, 0);
}
template <typename VecA, typename VecB>
inline void
copyVector(VecA const& src, VecB& dst)
{
assert(src.size() == dst.size());
// We do not use std::fill since we rely only on size() and operator[] member functions.
for (unsigned int i = 0; i < src.size(); ++i) dst[i] = src[i];
}
template <typename VecA, typename VecB>
inline void
copyVectorSlice(VecA const& src, unsigned int srcStart, unsigned int srcLen,
VecB& dst, unsigned int dstStart)
{
unsigned int const end = std::min(srcStart + srcLen, src.size());
unsigned int const sz = dst.size();
unsigned int i0, i1;
for (i0 = srcStart, i1 = dstStart; i0 < end && i1 < sz; ++i0, ++i1) dst[i1] = src[i0];
}
template <typename Vec>
inline typename Vec::value_type
norm_L1(Vec const& v)
{
typename Vec::value_type res(0);
for (unsigned int i = 0; i < v.size(); ++i) res += fabs(v[i]);
return res;
}
template <typename Vec>
inline typename Vec::value_type
norm_Linf(Vec const& v)
{
typename Vec::value_type res(0);
for (unsigned int i = 0; i < v.size(); ++i) res = std::max(res, fabs(v[i]));
return res;
}
template <typename Vec>
inline typename Vec::value_type
norm_L2(Vec const& v)
{
typename Vec::value_type res(0);
for (unsigned int i = 0; i < v.size(); ++i) res += v[i]*v[i];
return sqrt((double)res);
}
template <typename Vec>
inline typename Vec::value_type
sqrNorm_L2(Vec const& v)
{
typename Vec::value_type res(0);
for (unsigned int i = 0; i < v.size(); ++i) res += v[i]*v[i];
return res;
}
template <typename Vec>
inline void
normalizeVector(Vec& v)
{
typename Vec::value_type norm(norm_L2(v));
for (unsigned int i = 0; i < v.size(); ++i) v[i] /= norm;
}
template<typename VecA, typename VecB>
inline typename VecA::value_type
innerProduct(VecA const& a, VecB const& b)
{
assert(a.size() == b.size());
typename VecA::value_type res(0);
for (unsigned int i = 0; i < a.size(); ++i) res += a[i] * b[i];
return res;
}
template <typename Elem, typename VecA, typename VecB>
inline void
scaleVector(Elem s, VecA const& v, VecB& dst)
{
for (unsigned int i = 0; i < v.size(); ++i) dst[i] = s * v[i];
}
template <typename Elem, typename Vec>
inline void
scaleVectorIP(Elem s, Vec& v)
{
typedef typename Vec::value_type Elem2;
for (unsigned int i = 0; i < v.size(); ++i)
v[i] = (Elem2)(v[i] * s);
}
template <typename VecA, typename VecB, typename VecC>
inline void
makeCrossProductVector(VecA const& v, VecB const& w, VecC& dst)
{
assert(v.size() == 3);
assert(w.size() == 3);
assert(dst.size() == 3);
dst[0] = v[1]*w[2] - v[2]*w[1];
dst[1] = v[2]*w[0] - v[0]*w[2];
dst[2] = v[0]*w[1] - v[1]*w[0];
}
template <typename VecA, typename VecB, typename VecC>
inline void
addVectors(VecA const& v, VecB const& w, VecC& dst)
{
assert(v.size() == w.size());
assert(v.size() == dst.size());
for (unsigned int i = 0; i < v.size(); ++i) dst[i] = v[i] + w[i];
}
template <typename VecA, typename VecB, typename VecC>
inline void
subtractVectors(VecA const& v, VecB const& w, VecC& dst)
{
assert(v.size() == w.size());
assert(v.size() == dst.size());
for (unsigned int i = 0; i < v.size(); ++i) dst[i] = v[i] - w[i];
}
template <typename MatA, typename MatB>
inline void
copyMatrix(MatA const& src, MatB& dst)
{
unsigned int const rows = src.num_rows();
unsigned int const cols = src.num_cols();
assert(dst.num_rows() == rows);
assert(dst.num_cols() == cols);
for (unsigned int c = 0; c < cols; ++c)
for (unsigned int r = 0; r < rows; ++r) dst[r][c] = src[r][c];
}
template <typename MatA, typename MatB>
inline void
copyMatrixSlice(MatA const& src, unsigned int rowStart, unsigned int colStart, unsigned int rowLen, unsigned int colLen,
MatB& dst, unsigned int dstRow, unsigned int dstCol)
{
unsigned int const rows = dst.num_rows();
unsigned int const cols = dst.num_cols();
unsigned int const rowEnd = std::min(rowStart + rowLen, src.num_rows());
unsigned int const colEnd = std::min(colStart + colLen, src.num_cols());
unsigned int c0, c1, r0, r1;
for (c0 = colStart, c1 = dstCol; c0 < colEnd && c1 < cols; ++c0, ++c1)
for (r0 = rowStart, r1 = dstRow; r0 < rowEnd && r1 < rows; ++r0, ++r1)
dst[r1][c1] = src[r0][c0];
}
template <typename MatA, typename MatB>
inline void
makeTransposedMatrix(MatA const& src, MatB& dst)
{
unsigned int const rows = src.num_rows();
unsigned int const cols = src.num_cols();
assert(dst.num_cols() == rows);
assert(dst.num_rows() == cols);
for (unsigned int c = 0; c < cols; ++c)
for (unsigned int r = 0; r < rows; ++r) dst[c][r] = src[r][c];
}
template <typename Mat>
inline void
fillMatrix(Mat& m, typename Mat::value_type val)
{
unsigned int const rows = m.num_rows();
unsigned int const cols = m.num_cols();
for (unsigned int c = 0; c < cols; ++c)
for (unsigned int r = 0; r < rows; ++r) m[r][c] = val;
}
template <typename Mat>
inline void
makeZeroMatrix(Mat& m)
{
fillMatrix(m, 0);
}
template <typename Mat>
inline void
makeIdentityMatrix(Mat& m)
{
makeZeroMatrix(m);
unsigned int const rows = m.num_rows();
unsigned int const cols = m.num_cols();
unsigned int n = std::min(rows, cols);
for (unsigned int i = 0; i < n; ++i)
m[i][i] = 1;
}
template <typename Mat, typename Vec>
inline void
makeCrossProductMatrix(Vec const& v, Mat& m)
{
assert(v.size() == 3);
assert(m.num_rows() == 3);
assert(m.num_cols() == 3);
m[0][0] = 0; m[0][1] = -v[2]; m[0][2] = v[1];
m[1][0] = v[2]; m[1][1] = 0; m[1][2] = -v[0];
m[2][0] = -v[1]; m[2][1] = v[0]; m[2][2] = 0;
}
template <typename Mat, typename Vec>
inline void
makeOuterProductMatrix(Vec const& v, Mat& m)
{
assert(m.num_cols() == m.num_rows());
assert(v.size() == m.num_cols());
unsigned const sz = v.size();
for (unsigned r = 0; r < sz; ++r)
for (unsigned c = 0; c < sz; ++c) m[r][c] = v[r]*v[c];
}
template <typename Mat, typename VecA, typename VecB>
inline void
makeOuterProductMatrix(VecA const& u, VecB const& v, Mat& m)
{
assert(m.num_cols() == m.num_rows());
assert(u.size() == m.num_cols());
assert(v.size() == m.num_cols());
unsigned const sz = u.size();
for (unsigned r = 0; r < sz; ++r)
for (unsigned c = 0; c < sz; ++c) m[r][c] = u[r]*v[c];
}
template <typename MatA, typename MatB, typename MatC>
void addMatrices(MatA const& a, MatB const& b, MatC& dst)
{
assert(a.num_cols() == b.num_cols());
assert(a.num_rows() == b.num_rows());
assert(dst.num_cols() == a.num_cols());
assert(dst.num_rows() == a.num_rows());
unsigned int const rows = a.num_rows();
unsigned int const cols = a.num_cols();
for (unsigned r = 0; r < rows; ++r)
for (unsigned c = 0; c < cols; ++c) dst[r][c] = a[r][c] + b[r][c];
}
template <typename MatA, typename MatB>
void addMatricesIP(MatA const& a, MatB& dst)
{
assert(dst.num_cols() == a.num_cols());
assert(dst.num_rows() == a.num_rows());
unsigned int const rows = a.num_rows();
unsigned int const cols = a.num_cols();
for (unsigned r = 0; r < rows; ++r)
for (unsigned c = 0; c < cols; ++c) dst[r][c] += a[r][c];
}
template <typename MatA, typename MatB, typename MatC>
void subtractMatrices(MatA const& a, MatB const& b, MatC& dst)
{
assert(a.num_cols() == b.num_cols());
assert(a.num_rows() == b.num_rows());
assert(dst.num_cols() == a.num_cols());
assert(dst.num_rows() == a.num_rows());
unsigned int const rows = a.num_rows();
unsigned int const cols = a.num_cols();
for (unsigned r = 0; r < rows; ++r)
for (unsigned c = 0; c < cols; ++c) dst[r][c] = a[r][c] - b[r][c];
}
template <typename MatA, typename Elem, typename MatB>
inline void
makeScaledMatrix(MatA const& m, Elem scale, MatB& dst)
{
unsigned int const rows = m.num_rows();
unsigned int const cols = m.num_cols();
for (unsigned int c = 0; c < cols; ++c)
for (unsigned int r = 0; r < rows; ++r) dst[r][c] = m[r][c] * scale;
}
template <typename Mat, typename Elem>
inline void
scaleMatrixIP(Elem scale, Mat& m)
{
unsigned int const rows = m.num_rows();
unsigned int const cols = m.num_cols();
for (unsigned int c = 0; c < cols; ++c)
for (unsigned int r = 0; r < rows; ++r) m[r][c] *= scale;
}
template <typename Mat, typename VecA, typename VecB>
inline void
multiply_A_v(Mat const& m, VecA const& in, VecB& dst)
{
unsigned int const rows = m.num_rows();
unsigned int const cols = m.num_cols();
assert(in.size() == cols);
assert(dst.size() == rows);
makeZeroVector(dst);
for (unsigned int r = 0; r < rows; ++r)
for (unsigned int c = 0; c < cols; ++c) dst[r] += m[r][c] * in[c];
}
template <typename Mat, typename VecA, typename VecB>
inline void
multiply_A_v_projective(Mat const& m, VecA const& in, VecB& dst)
{
unsigned int const rows = m.num_rows();
unsigned int const cols = m.num_cols();
assert(in.size() == cols-1);
assert(dst.size() == rows-1);
typename VecB::value_type w = m(rows-1, cols-1);
unsigned int r, i;
for (i = 0; i < cols-1; ++i) w += m(rows-1, i) * in[i];
for (r = 0; r < rows-1; ++r) dst[r] = m(r, cols-1);
for (r = 0; r < rows-1; ++r)
for (unsigned int c = 0; c < cols-1; ++c) dst[r] += m[r][c] * in[c];
for (i = 0; i < rows-1; ++i) dst[i] /= w;
}
template <typename Mat, typename VecA, typename VecB>
inline void
multiply_A_v_affine(Mat const& m, VecA const& in, VecB& dst)
{
unsigned int const rows = m.num_rows();
unsigned int const cols = m.num_cols();
assert(in.size() == cols-1);
assert(dst.size() == rows);
unsigned int r;
for (r = 0; r < rows; ++r) dst[r] = m(r, cols-1);
for (r = 0; r < rows; ++r)
for (unsigned int c = 0; c < cols-1; ++c) dst[r] += m[r][c] * in[c];
}
template <typename Mat, typename VecA, typename VecB>
inline void
multiply_At_v(Mat const& m, VecA const& in, VecB& dst)
{
unsigned int const rows = m.num_rows();
unsigned int const cols = m.num_cols();
assert(in.size() == rows);
assert(dst.size() == cols);
makeZeroVector(dst);
for (unsigned int c = 0; c < cols; ++c)
for (unsigned int r = 0; r < rows; ++r) dst[c] += m[r][c] * in[r];
}
template <typename MatA, typename MatB>
inline void
multiply_At_A(MatA const& a, MatB& dst)
{
assert(dst.num_rows() == a.num_cols());
assert(dst.num_cols() == a.num_cols());
typedef typename MatB::value_type Elem;
Elem accum;
for (unsigned int r = 0; r < a.num_cols(); ++r)
for (unsigned int c = 0; c < a.num_cols(); ++c)
{
accum = 0;
for (unsigned int k = 0; k < a.num_rows(); ++k) accum += a[k][r] * a[k][c];
dst[r][c] = accum;
}
}
template <typename MatA, typename MatB, typename MatC>
inline void
multiply_A_B(MatA const& a, MatB const& b, MatC& dst)
{
assert(a.num_cols() == b.num_rows());
assert(dst.num_rows() == a.num_rows());
assert(dst.num_cols() == b.num_cols());
typedef typename MatC::value_type Elem;
Elem accum;
for (unsigned int r = 0; r < a.num_rows(); ++r)
for (unsigned int c = 0; c < b.num_cols(); ++c)
{
accum = 0;
for (unsigned int k = 0; k < a.num_cols(); ++k) accum += a[r][k] * b[k][c];
dst[r][c] = accum;
}
}
template <typename MatA, typename MatB, typename MatC>
inline void
multiply_At_B(MatA const& a, MatB const& b, MatC& dst)
{
assert(a.num_rows() == b.num_rows());
assert(dst.num_rows() == a.num_cols());
assert(dst.num_cols() == b.num_cols());
typedef typename MatC::value_type Elem;
Elem accum;
for (unsigned int r = 0; r < a.num_cols(); ++r)
for (unsigned int c = 0; c < b.num_cols(); ++c)
{
accum = 0;
for (unsigned int k = 0; k < a.num_rows(); ++k) accum += a[k][r] * b[k][c];
dst[r][c] = accum;
}
}
template <typename MatA, typename MatB, typename MatC>
inline void
multiply_A_Bt(MatA const& a, MatB const& b, MatC& dst)
{
assert(a.num_cols() == b.num_cols());
assert(dst.num_rows() == a.num_rows());
assert(dst.num_cols() == b.num_rows());
typedef typename MatC::value_type Elem;
Elem accum;
for (unsigned int r = 0; r < a.num_rows(); ++r)
for (unsigned int c = 0; c < b.num_rows(); ++c)
{
accum = 0;
for (unsigned int k = 0; k < a.num_cols(); ++k) accum += a[r][k] * b[c][k];
dst[r][c] = accum;
}
}
template <typename Mat>
inline void
transposeMatrixIP(Mat& a)
{
assert(a.num_rows() == a.num_cols());
for (unsigned int r = 0; r < a.num_rows(); ++r)
for (unsigned int c = 0; c < r; ++c)
std::swap(a[r][c], a[c][r]);
}
} // end namespace V3D
#endif

View File

@@ -1,391 +0,0 @@
// -*- C++ -*-
/*
Copyright (c) 2008 University of North Carolina at Chapel Hill
This file is part of SSBA (Simple Sparse Bundle Adjustment).
SSBA is free software: you can redistribute it and/or modify it under the
terms of the GNU Lesser General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option) any
later version.
SSBA is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License along
with SSBA. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef V3D_LINEAR_UTILS_H
#define V3D_LINEAR_UTILS_H
#include "Math/v3d_linear.h"
#include <iostream>
namespace V3D
{
template <typename Elem, int Size>
struct InlineVector : public InlineVectorBase<Elem, Size>
{
}; // end struct InlineVector
template <typename Elem>
struct Vector : public VectorBase<Elem>
{
Vector()
: VectorBase<Elem>()
{ }
Vector(unsigned int size)
: VectorBase<Elem>(size)
{ }
Vector(unsigned int size, Elem * values)
: VectorBase<Elem>(size, values)
{ }
Vector(Vector<Elem> const& a)
: VectorBase<Elem>(a)
{ }
Vector<Elem>& operator=(Vector<Elem> const& a)
{
(VectorBase<Elem>::operator=)(a);
return *this;
}
Vector<Elem>& operator+=(Vector<Elem> const& rhs)
{
addVectorsIP(rhs, *this);
return *this;
}
Vector<Elem>& operator*=(Elem scale)
{
scaleVectorsIP(scale, *this);
return *this;
}
Vector<Elem> operator+(Vector<Elem> const& rhs) const
{
Vector<Elem> res(this->size());
addVectors(*this, rhs, res);
return res;
}
Vector<Elem> operator-(Vector<Elem> const& rhs) const
{
Vector<Elem> res(this->size());
subtractVectors(*this, rhs, res);
return res;
}
Elem operator*(Vector<Elem> const& rhs) const
{
return innerProduct(*this, rhs);
}
}; // end struct Vector
template <typename Elem, int Rows, int Cols>
struct InlineMatrix : public InlineMatrixBase<Elem, Rows, Cols>
{
}; // end struct InlineMatrix
template <typename Elem>
struct Matrix : public MatrixBase<Elem>
{
Matrix()
: MatrixBase<Elem>()
{ }
Matrix(unsigned int rows, unsigned int cols)
: MatrixBase<Elem>(rows, cols)
{ }
Matrix(unsigned int rows, unsigned int cols, Elem * values)
: MatrixBase<Elem>(rows, cols, values)
{ }
Matrix(Matrix<Elem> const& a)
: MatrixBase<Elem>(a)
{ }
Matrix<Elem>& operator=(Matrix<Elem> const& a)
{
(MatrixBase<Elem>::operator=)(a);
return *this;
}
Matrix<Elem>& operator+=(Matrix<Elem> const& rhs)
{
addMatricesIP(rhs, *this);
return *this;
}
Matrix<Elem>& operator*=(Elem scale)
{
scaleMatrixIP(scale, *this);
return *this;
}
Matrix<Elem> operator+(Matrix<Elem> const& rhs) const
{
Matrix<Elem> res(this->num_rows(), this->num_cols());
addMatrices(*this, rhs, res);
return res;
}
Matrix<Elem> operator-(Matrix<Elem> const& rhs) const
{
Matrix<Elem> res(this->num_rows(), this->num_cols());
subtractMatrices(*this, rhs, res);
return res;
}
}; // end struct Matrix
//----------------------------------------------------------------------
typedef InlineVector<float, 2> Vector2f;
typedef InlineVector<double, 2> Vector2d;
typedef InlineVector<float, 3> Vector3f;
typedef InlineVector<double, 3> Vector3d;
typedef InlineVector<float, 4> Vector4f;
typedef InlineVector<double, 4> Vector4d;
typedef InlineMatrix<float, 2, 2> Matrix2x2f;
typedef InlineMatrix<double, 2, 2> Matrix2x2d;
typedef InlineMatrix<float, 3, 3> Matrix3x3f;
typedef InlineMatrix<double, 3, 3> Matrix3x3d;
typedef InlineMatrix<float, 4, 4> Matrix4x4f;
typedef InlineMatrix<double, 4, 4> Matrix4x4d;
typedef InlineMatrix<float, 2, 3> Matrix2x3f;
typedef InlineMatrix<double, 2, 3> Matrix2x3d;
typedef InlineMatrix<float, 3, 4> Matrix3x4f;
typedef InlineMatrix<double, 3, 4> Matrix3x4d;
template <typename Elem>
struct VectorArray
{
VectorArray(unsigned count, unsigned size)
: _count(count), _size(size), _values(0), _vectors(0)
{
unsigned const nTotal = _count * _size;
if (count > 0) _vectors = new Vector<Elem>[count];
if (nTotal > 0) _values = new Elem[nTotal];
for (unsigned i = 0; i < _count; ++i) new (&_vectors[i]) Vector<Elem>(_size, _values + i*_size);
}
VectorArray(unsigned count, unsigned size, Elem initVal)
: _count(count), _size(size), _values(0), _vectors(0)
{
unsigned const nTotal = _count * _size;
if (count > 0) _vectors = new Vector<Elem>[count];
if (nTotal > 0) _values = new Elem[nTotal];
for (unsigned i = 0; i < _count; ++i) new (&_vectors[i]) Vector<Elem>(_size, _values + i*_size);
std::fill(_values, _values + nTotal, initVal);
}
~VectorArray()
{
delete [] _values;
delete [] _vectors;
}
unsigned count() const { return _count; }
unsigned size() const { return _size; }
//! Get the submatrix at position ix
Vector<Elem> const& operator[](unsigned ix) const
{
return _vectors[ix];
}
//! Get the submatrix at position ix
Vector<Elem>& operator[](unsigned ix)
{
return _vectors[ix];
}
protected:
unsigned _count, _size;
Elem * _values;
Vector<Elem> * _vectors;
private:
VectorArray(VectorArray const&);
void operator=(VectorArray const&);
};
template <typename Elem>
struct MatrixArray
{
MatrixArray(unsigned count, unsigned nRows, unsigned nCols)
: _count(count), _rows(nRows), _columns(nCols), _values(0), _matrices(0)
{
unsigned const nTotal = _count * _rows * _columns;
if (count > 0) _matrices = new Matrix<Elem>[count];
if (nTotal > 0) _values = new double[nTotal];
for (unsigned i = 0; i < _count; ++i)
new (&_matrices[i]) Matrix<Elem>(_rows, _columns, _values + i*(_rows*_columns));
}
~MatrixArray()
{
delete [] _matrices;
delete [] _values;
}
//! Get the submatrix at position ix
Matrix<Elem> const& operator[](unsigned ix) const
{
return _matrices[ix];
}
//! Get the submatrix at position ix
Matrix<Elem>& operator[](unsigned ix)
{
return _matrices[ix];
}
unsigned count() const { return _count; }
unsigned num_rows() const { return _rows; }
unsigned num_cols() const { return _columns; }
protected:
unsigned _count, _rows, _columns;
double * _values;
Matrix<Elem> * _matrices;
private:
MatrixArray(MatrixArray const&);
void operator=(MatrixArray const&);
};
//----------------------------------------------------------------------
template <typename Elem, int Size>
inline InlineVector<Elem, Size>
operator+(InlineVector<Elem, Size> const& v, InlineVector<Elem, Size> const& w)
{
InlineVector<Elem, Size> res;
addVectors(v, w, res);
return res;
}
template <typename Elem, int Size>
inline InlineVector<Elem, Size>
operator-(InlineVector<Elem, Size> const& v, InlineVector<Elem, Size> const& w)
{
InlineVector<Elem, Size> res;
subtractVectors(v, w, res);
return res;
}
template <typename Elem, int Size>
inline InlineVector<Elem, Size>
operator*(Elem scale, InlineVector<Elem, Size> const& v)
{
InlineVector<Elem, Size> res;
scaleVector(scale, v, res);
return res;
}
template <typename Elem, int Rows, int Cols>
inline InlineVector<Elem, Rows>
operator*(InlineMatrix<Elem, Rows, Cols> const& A, InlineVector<Elem, Cols> const& v)
{
InlineVector<Elem, Rows> res;
multiply_A_v(A, v, res);
return res;
}
template <typename Elem, int RowsA, int ColsA, int ColsB>
inline InlineMatrix<Elem, RowsA, ColsB>
operator*(InlineMatrix<Elem, RowsA, ColsA> const& A, InlineMatrix<Elem, ColsA, ColsB> const& B)
{
InlineMatrix<Elem, RowsA, ColsB> res;
multiply_A_B(A, B, res);
return res;
}
template <typename Elem, int Rows, int Cols>
inline InlineMatrix<Elem, Cols, Rows>
transposedMatrix(InlineMatrix<Elem, Rows, Cols> const& A)
{
InlineMatrix<Elem, Cols, Rows> At;
makeTransposedMatrix(A, At);
return At;
}
template <typename Elem>
inline InlineMatrix<Elem, 3, 3>
invertedMatrix(InlineMatrix<Elem, 3, 3> const& A)
{
Elem a = A[0][0], b = A[0][1], c = A[0][2];
Elem d = A[1][0], e = A[1][1], f = A[1][2];
Elem g = A[2][0], h = A[2][1], i = A[2][2];
Elem const det = a*e*i + b*f*g + c*d*h - c*e*g - b*d*i - a*f*h;
InlineMatrix<Elem, 3, 3> res;
res[0][0] = e*i-f*h; res[0][1] = c*h-b*i; res[0][2] = b*f-c*e;
res[1][0] = f*g-d*i; res[1][1] = a*i-c*g; res[1][2] = c*d-a*f;
res[2][0] = d*h-e*g; res[2][1] = b*g-a*h; res[2][2] = a*e-b*d;
scaleMatrixIP(1.0/det, res);
return res;
}
template <typename Elem>
inline InlineVector<Elem, 2>
makeVector2(Elem a, Elem b)
{
InlineVector<Elem, 2> res;
res[0] = a; res[1] = b;
return res;
}
template <typename Elem>
inline InlineVector<Elem, 3>
makeVector3(Elem a, Elem b, Elem c)
{
InlineVector<Elem, 3> res;
res[0] = a; res[1] = b; res[2] = c;
return res;
}
template <typename Vec>
inline void
displayVector(Vec const& v)
{
using namespace std;
for (int r = 0; r < v.size(); ++r)
cout << v[r] << " ";
cout << endl;
}
template <typename Mat>
inline void
displayMatrix(Mat const& A)
{
using namespace std;
for (int r = 0; r < A.num_rows(); ++r)
{
for (int c = 0; c < A.num_cols(); ++c)
cout << A[r][c] << " ";
cout << endl;
}
}
} // end namespace V3D
#endif

View File

@@ -1,59 +0,0 @@
// -*- C++ -*-
/*
Copyright (c) 2008 University of North Carolina at Chapel Hill
This file is part of SSBA (Simple Sparse Bundle Adjustment).
SSBA is free software: you can redistribute it and/or modify it under the
terms of the GNU Lesser General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option) any
later version.
SSBA is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License along
with SSBA. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef V3D_MATH_UTILITIES_H
#define V3D_MATH_UTILITIES_H
#include "Math/v3d_linear.h"
#include "Math/v3d_linear_utils.h"
#include <vector>
namespace V3D
{
template <typename Vec, typename Mat>
inline void
createRotationMatrixRodriguez(Vec const& omega, Mat& R)
{
assert(omega.size() == 3);
assert(R.num_rows() == 3);
assert(R.num_cols() == 3);
double const theta = norm_L2(omega);
makeIdentityMatrix(R);
if (fabs(theta) > 1e-6)
{
Matrix3x3d J, J2;
makeCrossProductMatrix(omega, J);
multiply_A_B(J, J, J2);
double const c1 = sin(theta)/theta;
double const c2 = (1-cos(theta))/(theta*theta);
for (int i = 0; i < 3; ++i)
for (int j = 0; j < 3; ++j)
R[i][j] += c1*J[i][j] + c2*J2[i][j];
}
} // end createRotationMatrixRodriguez()
template <typename T> inline double sqr(T x) { return x*x; }
} // namespace V3D
#endif

View File

@@ -1,955 +0,0 @@
/*
Copyright (c) 2008 University of North Carolina at Chapel Hill
This file is part of SSBA (Simple Sparse Bundle Adjustment).
SSBA is free software: you can redistribute it and/or modify it under the
terms of the GNU Lesser General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option) any
later version.
SSBA is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License along
with SSBA. If not, see <http://www.gnu.org/licenses/>.
*/
#include "Math/v3d_optimization.h"
#if defined(V3DLIB_ENABLE_SUITESPARSE)
//# include "COLAMD/Include/colamd.h"
# include "colamd.h"
extern "C"
{
//# include "LDL/Include/ldl.h"
# include "ldl.h"
}
#endif
#include <iostream>
#include <map>
#define USE_BLOCK_REORDERING 1
#define USE_MULTIPLICATIVE_UPDATE 1
using namespace std;
namespace
{
using namespace V3D;
inline double
squaredResidual(VectorArray<double> const& e)
{
int const N = e.count();
int const M = e.size();
double res = 0.0;
for (int n = 0; n < N; ++n)
for (int m = 0; m < M; ++m)
res += e[n][m] * e[n][m];
return res;
} // end squaredResidual()
} // end namespace <>
namespace V3D
{
int optimizerVerbosenessLevel = 0;
#if defined(V3DLIB_ENABLE_SUITESPARSE)
void
SparseLevenbergOptimizer::setupSparseJtJ()
{
int const nVaryingA = _nParametersA - _nNonvaryingA;
int const nVaryingB = _nParametersB - _nNonvaryingB;
int const nVaryingC = _paramDimensionC - _nNonvaryingC;
int const bColumnStart = nVaryingA*_paramDimensionA;
int const cColumnStart = bColumnStart + nVaryingB*_paramDimensionB;
int const nColumns = cColumnStart + nVaryingC;
_jointNonzerosW.clear();
_jointIndexW.resize(_nMeasurements);
#if 1
{
map<pair<int, int>, int> jointNonzeroMap;
for (size_t k = 0; k < _nMeasurements; ++k)
{
int const i = _correspondingParamA[k] - _nNonvaryingA;
int const j = _correspondingParamB[k] - _nNonvaryingB;
if (i >= 0 && j >= 0)
{
map<pair<int, int>, int>::const_iterator p = jointNonzeroMap.find(make_pair(i, j));
if (p == jointNonzeroMap.end())
{
jointNonzeroMap.insert(make_pair(make_pair(i, j), _jointNonzerosW.size()));
_jointIndexW[k] = _jointNonzerosW.size();
_jointNonzerosW.push_back(make_pair(i, j));
}
else
{
_jointIndexW[k] = (*p).second;
} // end if
} // end if
} // end for (k)
} // end scope
#else
for (size_t k = 0; k < _nMeasurements; ++k)
{
int const i = _correspondingParamA[k] - _nNonvaryingA;
int const j = _correspondingParamB[k] - _nNonvaryingB;
if (i >= 0 && j >= 0)
{
_jointIndexW[k] = _jointNonzerosW.size();
_jointNonzerosW.push_back(make_pair(i, j));
}
} // end for (k)
#endif
#if defined(USE_BLOCK_REORDERING)
int const bBlockColumnStart = nVaryingA;
int const cBlockColumnStart = bBlockColumnStart + nVaryingB;
int const nBlockColumns = cBlockColumnStart + ((nVaryingC > 0) ? 1 : 0);
//cout << "nBlockColumns = " << nBlockColumns << endl;
// For the column reordering we treat the columns belonging to one set
// of parameters as one (logical) column.
// Determine non-zeros of JtJ (we forget about the non-zero diagonal for now)
// Only consider nonzeros of Ai^t * Bj induced by the measurements.
vector<pair<int, int> > nz_blockJtJ(_jointNonzerosW.size());
for (int k = 0; k < _jointNonzerosW.size(); ++k)
{
nz_blockJtJ[k].first = _jointNonzerosW[k].second + bBlockColumnStart;
nz_blockJtJ[k].second = _jointNonzerosW[k].first;
}
if (nVaryingC > 0)
{
// We assume, that the global unknowns are linked to every other variable.
for (int i = 0; i < nVaryingA; ++i)
nz_blockJtJ.push_back(make_pair(cBlockColumnStart, i));
for (int j = 0; j < nVaryingB; ++j)
nz_blockJtJ.push_back(make_pair(cBlockColumnStart, j + bBlockColumnStart));
} // end if
int const nnzBlock = nz_blockJtJ.size();
vector<int> permBlockJtJ(nBlockColumns + 1);
if (nnzBlock > 0)
{
// cout << "nnzBlock = " << nnzBlock << endl;
CCS_Matrix<int> blockJtJ(nBlockColumns, nBlockColumns, nz_blockJtJ);
// cout << " nz_blockJtJ: " << endl;
// for (size_t k = 0; k < nz_blockJtJ.size(); ++k)
// cout << " " << nz_blockJtJ[k].first << ":" << nz_blockJtJ[k].second << endl;
// cout << endl;
int * colStarts = (int *)blockJtJ.getColumnStarts();
int * rowIdxs = (int *)blockJtJ.getRowIndices();
// cout << "blockJtJ_colStarts = ";
// for (int k = 0; k <= nBlockColumns; ++k) cout << colStarts[k] << " ";
// cout << endl;
// cout << "blockJtJ_rowIdxs = ";
// for (int k = 0; k < nnzBlock; ++k) cout << rowIdxs[k] << " ";
// cout << endl;
int stats[COLAMD_STATS];
symamd(nBlockColumns, rowIdxs, colStarts, &permBlockJtJ[0], (double *) NULL, stats, &calloc, &free);
if (optimizerVerbosenessLevel >= 2) symamd_report(stats);
}
else
{
for (int k = 0; k < permBlockJtJ.size(); ++k) permBlockJtJ[k] = k;
} // end if
// cout << "permBlockJtJ = ";
// for (int k = 0; k < permBlockJtJ.size(); ++k)
// cout << permBlockJtJ[k] << " ";
// cout << endl;
// From the determined symbolic permutation with logical variables, determine the actual ordering
_perm_JtJ.resize(nVaryingA*_paramDimensionA + nVaryingB*_paramDimensionB + nVaryingC + 1);
int curDstCol = 0;
for (int k = 0; k < permBlockJtJ.size()-1; ++k)
{
int const srcCol = permBlockJtJ[k];
if (srcCol < nVaryingA)
{
for (int n = 0; n < _paramDimensionA; ++n)
_perm_JtJ[curDstCol + n] = srcCol*_paramDimensionA + n;
curDstCol += _paramDimensionA;
}
else if (srcCol >= bBlockColumnStart && srcCol < cBlockColumnStart)
{
int const bStart = nVaryingA*_paramDimensionA;
int const j = srcCol - bBlockColumnStart;
for (int n = 0; n < _paramDimensionB; ++n)
_perm_JtJ[curDstCol + n] = bStart + j*_paramDimensionB + n;
curDstCol += _paramDimensionB;
}
else if (srcCol == cBlockColumnStart)
{
int const cStart = nVaryingA*_paramDimensionA + nVaryingB*_paramDimensionB;
for (int n = 0; n < nVaryingC; ++n)
_perm_JtJ[curDstCol + n] = cStart + n;
curDstCol += nVaryingC;
}
else
{
cerr << "Should not reach " << __LINE__ << ":" << __LINE__ << "!" << endl;
assert(false);
}
}
#else
vector<pair<int, int> > nz, nzL;
this->serializeNonZerosJtJ(nz);
for (int k = 0; k < nz.size(); ++k)
{
// Swap rows and columns, since serializeNonZerosJtJ() generates the
// upper triangular part but symamd wants the lower triangle.
nzL.push_back(make_pair(nz[k].second, nz[k].first));
}
_perm_JtJ.resize(nColumns+1);
if (nzL.size() > 0)
{
CCS_Matrix<int> symbJtJ(nColumns, nColumns, nzL);
int * colStarts = (int *)symbJtJ.getColumnStarts();
int * rowIdxs = (int *)symbJtJ.getRowIndices();
// cout << "symbJtJ_colStarts = ";
// for (int k = 0; k <= nColumns; ++k) cout << colStarts[k] << " ";
// cout << endl;
// cout << "symbJtJ_rowIdxs = ";
// for (int k = 0; k < nzL.size(); ++k) cout << rowIdxs[k] << " ";
// cout << endl;
int stats[COLAMD_STATS];
symamd(nColumns, rowIdxs, colStarts, &_perm_JtJ[0], (double *) NULL, stats, &calloc, &free);
if (optimizerVerbosenessLevel >= 2) symamd_report(stats);
}
else
{
for (int k = 0; k < _perm_JtJ.size(); ++k) _perm_JtJ[k] = k;
} //// end if
#endif
_perm_JtJ.back() = _perm_JtJ.size() - 1;
// cout << "_perm_JtJ = ";
// for (int k = 0; k < _perm_JtJ.size(); ++k) cout << _perm_JtJ[k] << " ";
// cout << endl;
// Finally, compute the inverse of the full permutation.
_invPerm_JtJ.resize(_perm_JtJ.size());
for (size_t k = 0; k < _perm_JtJ.size(); ++k)
_invPerm_JtJ[_perm_JtJ[k]] = k;
vector<pair<int, int> > nz_JtJ;
this->serializeNonZerosJtJ(nz_JtJ);
for (int k = 0; k < nz_JtJ.size(); ++k)
{
int const i = nz_JtJ[k].first;
int const j = nz_JtJ[k].second;
int pi = _invPerm_JtJ[i];
int pj = _invPerm_JtJ[j];
// Swap values if in lower triangular part
if (pi > pj) std::swap(pi, pj);
nz_JtJ[k].first = pi;
nz_JtJ[k].second = pj;
}
int const nnz = nz_JtJ.size();
// cout << "nz_JtJ = ";
// for (int k = 0; k < nnz; ++k) cout << nz_JtJ[k].first << ":" << nz_JtJ[k].second << " ";
// cout << endl;
_JtJ.create(nColumns, nColumns, nz_JtJ);
// cout << "_colStart_JtJ = ";
// for (int k = 0; k < _JtJ.num_cols(); ++k) cout << _JtJ.getColumnStarts()[k] << " ";
// cout << endl;
// cout << "_rowIdxs_JtJ = ";
// for (int k = 0; k < nnz; ++k) cout << _JtJ.getRowIndices()[k] << " ";
// cout << endl;
vector<int> workFlags(nColumns);
_JtJ_Lp.resize(nColumns+1);
_JtJ_Parent.resize(nColumns);
_JtJ_Lnz.resize(nColumns);
ldl_symbolic(nColumns, (int *)_JtJ.getColumnStarts(), (int *)_JtJ.getRowIndices(),
&_JtJ_Lp[0], &_JtJ_Parent[0], &_JtJ_Lnz[0],
&workFlags[0], NULL, NULL);
if (optimizerVerbosenessLevel >= 1)
cout << "SparseLevenbergOptimizer: Nonzeros in LDL decomposition: " << _JtJ_Lp[nColumns] << endl;
} // end SparseLevenbergOptimizer::setupSparseJtJ()
void
SparseLevenbergOptimizer::serializeNonZerosJtJ(vector<pair<int, int> >& dst) const
{
int const nVaryingA = _nParametersA - _nNonvaryingA;
int const nVaryingB = _nParametersB - _nNonvaryingB;
int const nVaryingC = _paramDimensionC - _nNonvaryingC;
int const bColumnStart = nVaryingA*_paramDimensionA;
int const cColumnStart = bColumnStart + nVaryingB*_paramDimensionB;
dst.clear();
// Add the diagonal block matrices (only the upper triangular part).
// Ui submatrices of JtJ
for (int i = 0; i < nVaryingA; ++i)
{
int const i0 = i * _paramDimensionA;
for (int c = 0; c < _paramDimensionA; ++c)
for (int r = 0; r <= c; ++r)
dst.push_back(make_pair(i0 + r, i0 + c));
}
// Vj submatrices of JtJ
for (int j = 0; j < nVaryingB; ++j)
{
int const j0 = j*_paramDimensionB + bColumnStart;
for (int c = 0; c < _paramDimensionB; ++c)
for (int r = 0; r <= c; ++r)
dst.push_back(make_pair(j0 + r, j0 + c));
}
// Z submatrix of JtJ
for (int c = 0; c < nVaryingC; ++c)
for (int r = 0; r <= c; ++r)
dst.push_back(make_pair(cColumnStart + r, cColumnStart + c));
// Add the elements i and j linked by an observation k
// W submatrix of JtJ
for (size_t n = 0; n < _jointNonzerosW.size(); ++n)
{
int const i0 = _jointNonzerosW[n].first;
int const j0 = _jointNonzerosW[n].second;
int const r0 = i0*_paramDimensionA;
int const c0 = j0*_paramDimensionB + bColumnStart;
for (int r = 0; r < _paramDimensionA; ++r)
for (int c = 0; c < _paramDimensionB; ++c)
dst.push_back(make_pair(r0 + r, c0 + c));
} // end for (n)
if (nVaryingC > 0)
{
// Finally, add the dense columns linking i (resp. j) with the global parameters.
// X submatrix of JtJ
for (int i = 0; i < nVaryingA; ++i)
{
int const i0 = i*_paramDimensionA;
for (int r = 0; r < _paramDimensionA; ++r)
for (int c = 0; c < nVaryingC; ++c)
dst.push_back(make_pair(i0 + r, cColumnStart + c));
}
// Y submatrix of JtJ
for (int j = 0; j < nVaryingB; ++j)
{
int const j0 = j*_paramDimensionB + bColumnStart;
for (int r = 0; r < _paramDimensionB; ++r)
for (int c = 0; c < nVaryingC; ++c)
dst.push_back(make_pair(j0 + r, cColumnStart + c));
}
} // end if
} // end SparseLevenbergOptimizer::serializeNonZerosJtJ()
void
SparseLevenbergOptimizer::fillSparseJtJ(MatrixArray<double> const& Ui,
MatrixArray<double> const& Vj,
MatrixArray<double> const& Wn,
Matrix<double> const& Z,
Matrix<double> const& X,
Matrix<double> const& Y)
{
int const nVaryingA = _nParametersA - _nNonvaryingA;
int const nVaryingB = _nParametersB - _nNonvaryingB;
int const nVaryingC = _paramDimensionC - _nNonvaryingC;
int const bColumnStart = nVaryingA*_paramDimensionA;
int const cColumnStart = bColumnStart + nVaryingB*_paramDimensionB;
int const nCols = _JtJ.num_cols();
int const nnz = _JtJ.getNonzeroCount();
// The following has to replicate the procedure as in serializeNonZerosJtJ()
int serial = 0;
double * values = _JtJ.getValues();
int const * destIdxs = _JtJ.getDestIndices();
// Add the diagonal block matrices (only the upper triangular part).
// Ui submatrices of JtJ
for (int i = 0; i < nVaryingA; ++i)
{
int const i0 = i * _paramDimensionA;
for (int c = 0; c < _paramDimensionA; ++c)
for (int r = 0; r <= c; ++r, ++serial)
values[destIdxs[serial]] = Ui[i][r][c];
}
// Vj submatrices of JtJ
for (int j = 0; j < nVaryingB; ++j)
{
int const j0 = j*_paramDimensionB + bColumnStart;
for (int c = 0; c < _paramDimensionB; ++c)
for (int r = 0; r <= c; ++r, ++serial)
values[destIdxs[serial]] = Vj[j][r][c];
}
// Z submatrix of JtJ
for (int c = 0; c < nVaryingC; ++c)
for (int r = 0; r <= c; ++r, ++serial)
values[destIdxs[serial]] = Z[r][c];
// Add the elements i and j linked by an observation k
// W submatrix of JtJ
for (size_t n = 0; n < _jointNonzerosW.size(); ++n)
{
for (int r = 0; r < _paramDimensionA; ++r)
for (int c = 0; c < _paramDimensionB; ++c, ++serial)
values[destIdxs[serial]] = Wn[n][r][c];
} // end for (k)
if (nVaryingC > 0)
{
// Finally, add the dense columns linking i (resp. j) with the global parameters.
// X submatrix of JtJ
for (int i = 0; i < nVaryingA; ++i)
{
int const r0 = i * _paramDimensionA;
for (int r = 0; r < _paramDimensionA; ++r)
for (int c = 0; c < nVaryingC; ++c, ++serial)
values[destIdxs[serial]] = X[r0+r][c];
}
// Y submatrix of JtJ
for (int j = 0; j < nVaryingB; ++j)
{
int const r0 = j * _paramDimensionB;
for (int r = 0; r < _paramDimensionB; ++r)
for (int c = 0; c < nVaryingC; ++c, ++serial)
values[destIdxs[serial]] = Y[r0+r][c];
}
} // end if
} // end SparseLevenbergOptimizer::fillSparseJtJ()
void
SparseLevenbergOptimizer::minimize()
{
status = LEVENBERG_OPTIMIZER_TIMEOUT;
bool computeDerivatives = true;
int const nVaryingA = _nParametersA - _nNonvaryingA;
int const nVaryingB = _nParametersB - _nNonvaryingB;
int const nVaryingC = _paramDimensionC - _nNonvaryingC;
if (nVaryingA == 0 && nVaryingB == 0 && nVaryingC == 0)
{
// No degrees of freedom, nothing to optimize.
status = LEVENBERG_OPTIMIZER_CONVERGED;
return;
}
this->setupSparseJtJ();
Vector<double> weights(_nMeasurements);
MatrixArray<double> Ak(_nMeasurements, _measurementDimension, _paramDimensionA);
MatrixArray<double> Bk(_nMeasurements, _measurementDimension, _paramDimensionB);
MatrixArray<double> Ck(_nMeasurements, _measurementDimension, _paramDimensionC);
MatrixArray<double> Ui(nVaryingA, _paramDimensionA, _paramDimensionA);
MatrixArray<double> Vj(nVaryingB, _paramDimensionB, _paramDimensionB);
// Wn = Ak^t*Bk
MatrixArray<double> Wn(_jointNonzerosW.size(), _paramDimensionA, _paramDimensionB);
Matrix<double> Z(nVaryingC, nVaryingC);
// X = A^t*C
Matrix<double> X(nVaryingA*_paramDimensionA, nVaryingC);
// Y = B^t*C
Matrix<double> Y(nVaryingB*_paramDimensionB, nVaryingC);
VectorArray<double> residuals(_nMeasurements, _measurementDimension);
VectorArray<double> residuals2(_nMeasurements, _measurementDimension);
VectorArray<double> diagUi(nVaryingA, _paramDimensionA);
VectorArray<double> diagVj(nVaryingB, _paramDimensionB);
Vector<double> diagZ(nVaryingC);
VectorArray<double> At_e(nVaryingA, _paramDimensionA);
VectorArray<double> Bt_e(nVaryingB, _paramDimensionB);
Vector<double> Ct_e(nVaryingC);
Vector<double> Jt_e(nVaryingA*_paramDimensionA + nVaryingB*_paramDimensionB + nVaryingC);
Vector<double> delta(nVaryingA*_paramDimensionA + nVaryingB*_paramDimensionB + nVaryingC);
Vector<double> deltaPerm(nVaryingA*_paramDimensionA + nVaryingB*_paramDimensionB + nVaryingC);
VectorArray<double> deltaAi(_nParametersA, _paramDimensionA);
VectorArray<double> deltaBj(_nParametersB, _paramDimensionB);
Vector<double> deltaC(_paramDimensionC);
double err = 0.0;
for (currentIteration = 0; currentIteration < maxIterations; ++currentIteration)
{
if (optimizerVerbosenessLevel >= 2)
cout << "SparseLevenbergOptimizer: currentIteration: " << currentIteration << endl;
if (computeDerivatives)
{
this->evalResidual(residuals);
this->fillWeights(residuals, weights);
for (int k = 0; k < _nMeasurements; ++k)
scaleVectorIP(weights[k], residuals[k]);
err = squaredResidual(residuals);
if (optimizerVerbosenessLevel >= 1) cout << "SparseLevenbergOptimizer: |residual|^2 = " << err << endl;
if (optimizerVerbosenessLevel >= 2) cout << "SparseLevenbergOptimizer: lambda = " << lambda << endl;
for (int k = 0; k < residuals.count(); ++k) scaleVectorIP(-1.0, residuals[k]);
this->setupJacobianGathering();
this->fillAllJacobians(weights, Ak, Bk, Ck);
// Compute the different parts of J^t*e
if (nVaryingA > 0)
{
for (int i = 0; i < nVaryingA; ++i) makeZeroVector(At_e[i]);
Vector<double> tmp(_paramDimensionA);
for (int k = 0; k < _nMeasurements; ++k)
{
int const i = _correspondingParamA[k] - _nNonvaryingA;
if (i < 0) continue;
multiply_At_v(Ak[k], residuals[k], tmp);
addVectors(tmp, At_e[i], At_e[i]);
} // end for (k)
} // end if
if (nVaryingB > 0)
{
for (int j = 0; j < nVaryingB; ++j) makeZeroVector(Bt_e[j]);
Vector<double> tmp(_paramDimensionB);
for (int k = 0; k < _nMeasurements; ++k)
{
int const j = _correspondingParamB[k] - _nNonvaryingB;
if (j < 0) continue;
multiply_At_v(Bk[k], residuals[k], tmp);
addVectors(tmp, Bt_e[j], Bt_e[j]);
} // end for (k)
} // end if
if (nVaryingC > 0)
{
makeZeroVector(Ct_e);
Vector<double> tmp(_paramDimensionC);
for (int k = 0; k < _nMeasurements; ++k)
{
multiply_At_v(Ck[k], residuals[k], tmp);
for (int l = 0; l < nVaryingC; ++l) Ct_e[l] += tmp[_nNonvaryingC + l];
}
} // end if
int pos = 0;
for (int i = 0; i < nVaryingA; ++i)
for (int l = 0; l < _paramDimensionA; ++l, ++pos)
Jt_e[pos] = At_e[i][l];
for (int j = 0; j < nVaryingB; ++j)
for (int l = 0; l < _paramDimensionB; ++l, ++pos)
Jt_e[pos] = Bt_e[j][l];
for (int l = 0; l < nVaryingC; ++l, ++pos)
Jt_e[pos] = Ct_e[l];
// cout << "Jt_e = ";
// for (int k = 0; k < Jt_e.size(); ++k) cout << Jt_e[k] << " ";
// cout << endl;
if (this->applyGradientStoppingCriteria(norm_Linf(Jt_e)))
{
status = LEVENBERG_OPTIMIZER_CONVERGED;
goto end;
}
// The lhs J^t*J consists of several parts:
// [ U W X ]
// J^t*J = [ W^t V Y ]
// [ X^t Y^t Z ],
// where U, V and W are block-sparse matrices (due to the sparsity of A and B).
// X, Y and Z contain only a few columns (the number of global parameters).
if (nVaryingA > 0)
{
// Compute Ui
Matrix<double> U(_paramDimensionA, _paramDimensionA);
for (int i = 0; i < nVaryingA; ++i) makeZeroMatrix(Ui[i]);
for (int k = 0; k < _nMeasurements; ++k)
{
int const i = _correspondingParamA[k] - _nNonvaryingA;
if (i < 0) continue;
multiply_At_A(Ak[k], U);
addMatricesIP(U, Ui[i]);
} // end for (k)
} // end if
if (nVaryingB > 0)
{
// Compute Vj
Matrix<double> V(_paramDimensionB, _paramDimensionB);
for (int j = 0; j < nVaryingB; ++j) makeZeroMatrix(Vj[j]);
for (int k = 0; k < _nMeasurements; ++k)
{
int const j = _correspondingParamB[k] - _nNonvaryingB;
if (j < 0) continue;
multiply_At_A(Bk[k], V);
addMatricesIP(V, Vj[j]);
} // end for (k)
} // end if
if (nVaryingC > 0)
{
Matrix<double> ZZ(_paramDimensionC, _paramDimensionC);
Matrix<double> Zsum(_paramDimensionC, _paramDimensionC);
makeZeroMatrix(Zsum);
for (int k = 0; k < _nMeasurements; ++k)
{
multiply_At_A(Ck[k], ZZ);
addMatricesIP(ZZ, Zsum);
} // end for (k)
// Ignore the non-varying parameters
for (int i = 0; i < nVaryingC; ++i)
for (int j = 0; j < nVaryingC; ++j)
Z[i][j] = Zsum[i+_nNonvaryingC][j+_nNonvaryingC];
} // end if
if (nVaryingA > 0 && nVaryingB > 0)
{
for (int n = 0; n < Wn.count(); ++n) makeZeroMatrix(Wn[n]);
Matrix<double> W(_paramDimensionA, _paramDimensionB);
for (int k = 0; k < _nMeasurements; ++k)
{
int const n = _jointIndexW[k];
if (n >= 0)
{
int const i0 = _jointNonzerosW[n].first;
int const j0 = _jointNonzerosW[n].second;
multiply_At_B(Ak[k], Bk[k], W);
addMatricesIP(W, Wn[n]);
} // end if
} // end for (k)
} // end if
if (nVaryingA > 0 && nVaryingC > 0)
{
Matrix<double> XX(_paramDimensionA, _paramDimensionC);
makeZeroMatrix(X);
for (int k = 0; k < _nMeasurements; ++k)
{
int const i = _correspondingParamA[k] - _nNonvaryingA;
// Ignore the non-varying parameters
if (i < 0) continue;
multiply_At_B(Ak[k], Ck[k], XX);
for (int r = 0; r < _paramDimensionA; ++r)
for (int c = 0; c < nVaryingC; ++c)
X[r+i*_paramDimensionA][c] += XX[r][c+_nNonvaryingC];
} // end for (k)
} // end if
if (nVaryingB > 0 && nVaryingC > 0)
{
Matrix<double> YY(_paramDimensionB, _paramDimensionC);
makeZeroMatrix(Y);
for (int k = 0; k < _nMeasurements; ++k)
{
int const j = _correspondingParamB[k] - _nNonvaryingB;
// Ignore the non-varying parameters
if (j < 0) continue;
multiply_At_B(Bk[k], Ck[k], YY);
for (int r = 0; r < _paramDimensionB; ++r)
for (int c = 0; c < nVaryingC; ++c)
Y[r+j*_paramDimensionB][c] += YY[r][c+_nNonvaryingC];
} // end for (k)
} // end if
if (currentIteration == 0)
{
// Initialize lambda as tau*max(JtJ[i][i])
double maxEl = -1e30;
if (nVaryingA > 0)
{
for (int i = 0; i < nVaryingA; ++i)
for (int l = 0; l < _paramDimensionA; ++l)
maxEl = std::max(maxEl, Ui[i][l][l]);
}
if (nVaryingB > 0)
{
for (int j = 0; j < nVaryingB; ++j)
for (int l = 0; l < _paramDimensionB; ++l)
maxEl = std::max(maxEl, Vj[j][l][l]);
}
if (nVaryingC > 0)
{
for (int l = 0; l < nVaryingC; ++l)
maxEl = std::max(maxEl, Z[l][l]);
}
lambda = tau * maxEl;
if (optimizerVerbosenessLevel >= 2)
cout << "SparseLevenbergOptimizer: initial lambda = " << lambda << endl;
} // end if (currentIteration == 0)
} // end if (computeDerivatives)
for (int i = 0; i < nVaryingA; ++i)
{
for (int l = 0; l < _paramDimensionA; ++l) diagUi[i][l] = Ui[i][l][l];
} // end for (i)
for (int j = 0; j < nVaryingB; ++j)
{
for (int l = 0; l < _paramDimensionB; ++l) diagVj[j][l] = Vj[j][l][l];
} // end for (j)
for (int l = 0; l < nVaryingC; ++l) diagZ[l] = Z[l][l];
// Augment the diagonals with lambda (either by the standard additive update or by multiplication).
#if !defined(USE_MULTIPLICATIVE_UPDATE)
for (int i = 0; i < nVaryingA; ++i)
for (unsigned l = 0; l < _paramDimensionA; ++l)
Ui[i][l][l] += lambda;
for (int j = 0; j < nVaryingB; ++j)
for (unsigned l = 0; l < _paramDimensionB; ++l)
Vj[j][l][l] += lambda;
for (unsigned l = 0; l < nVaryingC; ++l)
Z[l][l] += lambda;
#else
for (int i = 0; i < nVaryingA; ++i)
for (unsigned l = 0; l < _paramDimensionA; ++l)
Ui[i][l][l] = std::max(Ui[i][l][l] * (1.0 + lambda), 1e-15);
for (int j = 0; j < nVaryingB; ++j)
for (unsigned l = 0; l < _paramDimensionB; ++l)
Vj[j][l][l] = std::max(Vj[j][l][l] * (1.0 + lambda), 1e-15);
for (unsigned l = 0; l < nVaryingC; ++l)
Z[l][l] = std::max(Z[l][l] * (1.0 + lambda), 1e-15);
#endif
this->fillSparseJtJ(Ui, Vj, Wn, Z, X, Y);
bool success = true;
double rho = 0.0;
{
int const nCols = _JtJ_Parent.size();
int const nnz = _JtJ.getNonzeroCount();
int const lnz = _JtJ_Lp.back();
vector<int> Li(lnz);
vector<double> Lx(lnz);
vector<double> D(nCols), Y(nCols);
vector<int> workPattern(nCols), workFlag(nCols);
int * colStarts = (int *)_JtJ.getColumnStarts();
int * rowIdxs = (int *)_JtJ.getRowIndices();
double * values = _JtJ.getValues();
int const d = ldl_numeric(nCols, colStarts, rowIdxs, values,
&_JtJ_Lp[0], &_JtJ_Parent[0], &_JtJ_Lnz[0],
&Li[0], &Lx[0], &D[0],
&Y[0], &workPattern[0], &workFlag[0],
NULL, NULL);
if (d == nCols)
{
ldl_perm(nCols, &deltaPerm[0], &Jt_e[0], &_perm_JtJ[0]);
ldl_lsolve(nCols, &deltaPerm[0], &_JtJ_Lp[0], &Li[0], &Lx[0]);
ldl_dsolve(nCols, &deltaPerm[0], &D[0]);
ldl_ltsolve(nCols, &deltaPerm[0], &_JtJ_Lp[0], &Li[0], &Lx[0]);
ldl_permt(nCols, &delta[0], &deltaPerm[0], &_perm_JtJ[0]);
}
else
{
if (optimizerVerbosenessLevel >= 2)
cout << "SparseLevenbergOptimizer: LDL decomposition failed. Increasing lambda." << endl;
success = false;
}
}
if (success)
{
double const deltaSqrLength = sqrNorm_L2(delta);
if (optimizerVerbosenessLevel >= 2)
cout << "SparseLevenbergOptimizer: ||delta||^2 = " << deltaSqrLength << endl;
double const paramLength = this->getParameterLength();
if (this->applyUpdateStoppingCriteria(paramLength, sqrt(deltaSqrLength)))
{
status = LEVENBERG_OPTIMIZER_SMALL_UPDATE;
goto end;
}
// Copy the updates from delta to the respective arrays
int pos = 0;
for (int i = 0; i < _nNonvaryingA; ++i) makeZeroVector(deltaAi[i]);
for (int i = _nNonvaryingA; i < _nParametersA; ++i)
for (int l = 0; l < _paramDimensionA; ++l, ++pos)
deltaAi[i][l] = delta[pos];
for (int j = 0; j < _nNonvaryingB; ++j) makeZeroVector(deltaBj[j]);
for (int j = _nNonvaryingB; j < _nParametersB; ++j)
for (int l = 0; l < _paramDimensionB; ++l, ++pos)
deltaBj[j][l] = delta[pos];
makeZeroVector(deltaC);
for (int l = _nNonvaryingC; l < _paramDimensionC; ++l, ++pos)
deltaC[l] = delta[pos];
saveAllParameters();
if (nVaryingA > 0) updateParametersA(deltaAi);
if (nVaryingB > 0) updateParametersB(deltaBj);
if (nVaryingC > 0) updateParametersC(deltaC);
this->evalResidual(residuals2);
for (int k = 0; k < _nMeasurements; ++k)
scaleVectorIP(weights[k], residuals2[k]);
double const newErr = squaredResidual(residuals2);
rho = err - newErr;
if (optimizerVerbosenessLevel >= 2)
cout << "SparseLevenbergOptimizer: |new residual|^2 = " << newErr << endl;
#if !defined(USE_MULTIPLICATIVE_UPDATE)
double const denom1 = lambda * deltaSqrLength;
#else
double denom1 = 0.0f;
for (int i = _nNonvaryingA; i < _nParametersA; ++i)
for (int l = 0; l < _paramDimensionA; ++l)
denom1 += deltaAi[i][l] * deltaAi[i][l] * diagUi[i-_nNonvaryingA][l];
for (int j = _nNonvaryingB; j < _nParametersB; ++j)
for (int l = 0; l < _paramDimensionB; ++l)
denom1 += deltaBj[j][l] * deltaBj[j][l] * diagVj[j-_nNonvaryingB][l];
for (int l = _nNonvaryingC; l < _paramDimensionC; ++l)
denom1 += deltaC[l] * deltaC[l] * diagZ[l-_nNonvaryingC];
denom1 *= lambda;
#endif
double const denom2 = innerProduct(delta, Jt_e);
rho = rho / (denom1 + denom2);
if (optimizerVerbosenessLevel >= 2)
cout << "SparseLevenbergOptimizer: rho = " << rho
<< " denom1 = " << denom1 << " denom2 = " << denom2 << endl;
} // end if (success)
if (success && rho > 0)
{
if (optimizerVerbosenessLevel >= 2)
cout << "SparseLevenbergOptimizer: Improved solution - decreasing lambda." << endl;
// Improvement in the new solution
decreaseLambda(rho);
computeDerivatives = true;
}
else
{
if (optimizerVerbosenessLevel >= 2)
cout << "SparseLevenbergOptimizer: Inferior solution - increasing lambda." << endl;
restoreAllParameters();
increaseLambda();
computeDerivatives = false;
// Restore diagonal elements in Ui, Vj and Z.
for (int i = 0; i < nVaryingA; ++i)
{
for (int l = 0; l < _paramDimensionA; ++l) Ui[i][l][l] = diagUi[i][l];
} // end for (i)
for (int j = 0; j < nVaryingB; ++j)
{
for (int l = 0; l < _paramDimensionB; ++l) Vj[j][l][l] = diagVj[j][l];
} // end for (j)
for (int l = 0; l < nVaryingC; ++l) Z[l][l] = diagZ[l];
} // end if
} // end for
end:;
if (optimizerVerbosenessLevel >= 2)
cout << "Leaving SparseLevenbergOptimizer::minimize()." << endl;
} // end SparseLevenbergOptimizer::minimize()
#endif // defined(V3DLIB_ENABLE_SUITESPARSE)
} // end namespace V3D

View File

@@ -1,273 +0,0 @@
// -*- C++ -*-
/*
Copyright (c) 2008 University of North Carolina at Chapel Hill
This file is part of SSBA (Simple Sparse Bundle Adjustment).
SSBA is free software: you can redistribute it and/or modify it under the
terms of the GNU Lesser General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option) any
later version.
SSBA is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License along
with SSBA. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef V3D_OPTIMIZATION_H
#define V3D_OPTIMIZATION_H
#include "Math/v3d_linear.h"
#include "Math/v3d_mathutilities.h"
#include <vector>
#include <iostream>
namespace V3D
{
enum
{
LEVENBERG_OPTIMIZER_TIMEOUT = 0,
LEVENBERG_OPTIMIZER_SMALL_UPDATE = 1,
LEVENBERG_OPTIMIZER_CONVERGED = 2
};
extern int optimizerVerbosenessLevel;
struct LevenbergOptimizerCommon
{
LevenbergOptimizerCommon()
: status(LEVENBERG_OPTIMIZER_TIMEOUT), currentIteration(0), maxIterations(50),
tau(1e-3), lambda(1e-3),
gradientThreshold(1e-10), updateThreshold(1e-10),
_nu(2.0)
{ }
virtual ~LevenbergOptimizerCommon() {}
// See Madsen et al., "Methods for non-linear least squares problems."
virtual void increaseLambda()
{
lambda *= _nu; _nu *= 2.0;
}
virtual void decreaseLambda(double const rho)
{
double const r = 2*rho - 1.0;
lambda *= std::max(1.0/3.0, 1 - r*r*r);
if (lambda < 1e-10) lambda = 1e-10;
_nu = 2;
}
bool applyGradientStoppingCriteria(double maxGradient) const
{
return maxGradient < gradientThreshold;
}
bool applyUpdateStoppingCriteria(double paramLength, double updateLength) const
{
return updateLength < updateThreshold * (paramLength + updateThreshold);
}
int status;
int currentIteration, maxIterations;
double tau, lambda;
double gradientThreshold, updateThreshold;
protected:
double _nu;
}; // end struct LevenbergOptimizerCommon
# if defined(V3DLIB_ENABLE_SUITESPARSE)
struct SparseLevenbergOptimizer : public LevenbergOptimizerCommon
{
SparseLevenbergOptimizer(int measurementDimension,
int nParametersA, int paramDimensionA,
int nParametersB, int paramDimensionB,
int paramDimensionC,
std::vector<int> const& correspondingParamA,
std::vector<int> const& correspondingParamB)
: LevenbergOptimizerCommon(),
_nMeasurements(correspondingParamA.size()),
_measurementDimension(measurementDimension),
_nParametersA(nParametersA), _paramDimensionA(paramDimensionA),
_nParametersB(nParametersB), _paramDimensionB(paramDimensionB),
_paramDimensionC(paramDimensionC),
_nNonvaryingA(0), _nNonvaryingB(0), _nNonvaryingC(0),
_correspondingParamA(correspondingParamA),
_correspondingParamB(correspondingParamB)
{
assert(correspondingParamA.size() == correspondingParamB.size());
}
~SparseLevenbergOptimizer() { }
void setNonvaryingCounts(int nNonvaryingA, int nNonvaryingB, int nNonvaryingC)
{
_nNonvaryingA = nNonvaryingA;
_nNonvaryingB = nNonvaryingB;
_nNonvaryingC = nNonvaryingC;
}
void getNonvaryingCounts(int& nNonvaryingA, int& nNonvaryingB, int& nNonvaryingC) const
{
nNonvaryingA = _nNonvaryingA;
nNonvaryingB = _nNonvaryingB;
nNonvaryingC = _nNonvaryingC;
}
void minimize();
virtual void evalResidual(VectorArray<double>& residuals) = 0;
virtual void fillWeights(VectorArray<double> const& residuals, Vector<double>& w)
{
(void)residuals;
std::fill(w.begin(), w.end(), 1.0);
}
void fillAllJacobians(Vector<double> const& w,
MatrixArray<double>& Ak,
MatrixArray<double>& Bk,
MatrixArray<double>& Ck)
{
int const nVaryingA = _nParametersA - _nNonvaryingA;
int const nVaryingB = _nParametersB - _nNonvaryingB;
int const nVaryingC = _paramDimensionC - _nNonvaryingC;
for (unsigned k = 0; k < _nMeasurements; ++k)
{
int const i = _correspondingParamA[k];
int const j = _correspondingParamB[k];
if (i < _nNonvaryingA && j < _nNonvaryingB) continue;
fillJacobians(Ak[k], Bk[k], Ck[k], i, j, k);
} // end for (k)
if (nVaryingA > 0)
{
for (unsigned k = 0; k < _nMeasurements; ++k)
scaleMatrixIP(w[k], Ak[k]);
}
if (nVaryingB > 0)
{
for (unsigned k = 0; k < _nMeasurements; ++k)
scaleMatrixIP(w[k], Bk[k]);
}
if (nVaryingC > 0)
{
for (unsigned k = 0; k < _nMeasurements; ++k)
scaleMatrixIP(w[k], Ck[k]);
}
} // end fillAllJacobians()
virtual void setupJacobianGathering() { }
virtual void fillJacobians(Matrix<double>& Ak, Matrix<double>& Bk, Matrix<double>& Ck,
int i, int j, int k) = 0;
virtual double getParameterLength() const = 0;
virtual void updateParametersA(VectorArray<double> const& deltaAi) = 0;
virtual void updateParametersB(VectorArray<double> const& deltaBj) = 0;
virtual void updateParametersC(Vector<double> const& deltaC) = 0;
virtual void saveAllParameters() = 0;
virtual void restoreAllParameters() = 0;
int currentIteration, maxIterations;
protected:
void serializeNonZerosJtJ(std::vector<std::pair<int, int> >& dst) const;
void setupSparseJtJ();
void fillSparseJtJ(MatrixArray<double> const& Ui, MatrixArray<double> const& Vj, MatrixArray<double> const& Wk,
Matrix<double> const& Z, Matrix<double> const& X, Matrix<double> const& Y);
int const _nMeasurements, _measurementDimension;
int const _nParametersA, _paramDimensionA;
int const _nParametersB, _paramDimensionB;
int const _paramDimensionC;
int _nNonvaryingA, _nNonvaryingB, _nNonvaryingC;
std::vector<int> const& _correspondingParamA;
std::vector<int> const& _correspondingParamB;
std::vector<pair<int, int> > _jointNonzerosW;
std::vector<int> _jointIndexW;
std::vector<int> _JtJ_Lp, _JtJ_Parent, _JtJ_Lnz;
std::vector<int> _perm_JtJ, _invPerm_JtJ;
CCS_Matrix<double> _JtJ;
}; // end struct SparseLevenbergOptimizer
struct StdSparseLevenbergOptimizer : public SparseLevenbergOptimizer
{
StdSparseLevenbergOptimizer(int measurementDimension,
int nParametersA, int paramDimensionA,
int nParametersB, int paramDimensionB,
int paramDimensionC,
std::vector<int> const& correspondingParamA,
std::vector<int> const& correspondingParamB)
: SparseLevenbergOptimizer(measurementDimension, nParametersA, paramDimensionA,
nParametersB, paramDimensionB, paramDimensionC,
correspondingParamA, correspondingParamB),
curParametersA(nParametersA, paramDimensionA), savedParametersA(nParametersA, paramDimensionA),
curParametersB(nParametersB, paramDimensionB), savedParametersB(nParametersB, paramDimensionB),
curParametersC(paramDimensionC), savedParametersC(paramDimensionC)
{ }
virtual double getParameterLength() const
{
double res = 0.0;
for (int i = 0; i < _nParametersA; ++i) res += sqrNorm_L2(curParametersA[i]);
for (int j = 0; j < _nParametersB; ++j) res += sqrNorm_L2(curParametersB[j]);
res += sqrNorm_L2(curParametersC);
return sqrt(res);
}
virtual void updateParametersA(VectorArray<double> const& deltaAi)
{
for (int i = 0; i < _nParametersA; ++i) addVectors(deltaAi[i], curParametersA[i], curParametersA[i]);
}
virtual void updateParametersB(VectorArray<double> const& deltaBj)
{
for (int j = 0; j < _nParametersB; ++j) addVectors(deltaBj[j], curParametersB[j], curParametersB[j]);
}
virtual void updateParametersC(Vector<double> const& deltaC)
{
addVectors(deltaC, curParametersC, curParametersC);
}
virtual void saveAllParameters()
{
for (int i = 0; i < _nParametersA; ++i) savedParametersA[i] = curParametersA[i];
for (int j = 0; j < _nParametersB; ++j) savedParametersB[j] = curParametersB[j];
savedParametersC = curParametersC;
}
virtual void restoreAllParameters()
{
for (int i = 0; i < _nParametersA; ++i) curParametersA[i] = savedParametersA[i];
for (int j = 0; j < _nParametersB; ++j) curParametersB[j] = savedParametersB[j];
curParametersC = savedParametersC;
}
VectorArray<double> curParametersA, savedParametersA;
VectorArray<double> curParametersB, savedParametersB;
Vector<double> curParametersC, savedParametersC;
}; // end struct StdSparseLevenbergOptimizer
# endif
} // end namespace V3D
#endif

View File

@@ -1,92 +0,0 @@
Description
This is an implementation of a sparse Levenberg-Marquardt optimization
procedure and several bundle adjustment modules based on it. There are three
versions of bundle adjustment:
1) Pure metric adjustment. Camera poses have 6 dof and 3D points have 3 dof.
2) Common, but adjustable intrinsic and distortion parameters. This is useful,
if the set of images are taken with the same camera under constant zoom
settings.
3) Variable intrinsics and distortion parameters for each view. This addresses
the "community photo collection" setting, where each image is captured with
a different camera and/or with varying zoom setting.
There are two demo applications in the Apps directory, bundle_common and
bundle_varying, which correspond to item 2) and 3) above.
The input data file for both applications is a text file with the following
numerical values:
First, the number of 3D points, views and 2D measurements:
<M> <N> <K>
Then, the values of the intrinsic matrix
[ fx skew cx ]
K = [ 0 fy cy ]
[ 0 0 1 ],
and the distortion parameters according to the convention of the Bouget
toolbox:
<fx> <skew> <cx> <fy> <cy> <k1> <k2> <p1> <p2>
For the bundle_varying application this is given <N> times, one for each
camera/view.
Then the <M> 3D point positions are given:
<point-id> <X> <Y> <Z>
Note: the point-ids need not to be exactly from 0 to M-1, any (unique) ids
will do.
The camera poses are given subsequently:
<view-id> <12 entries of the RT matrix>
There is a lot of confusion how to specify the orientation of cameras. We use
projection matrix notation, i.e. P = K [R|T], and a 3D point X in world
coordinates is transformed into the camera coordinate system by XX=R*X+T.
Finally, the <K> 2d image measurements (given in pixels) are provided:
<view-id> <point-id> <x> <y> 1
See the example in the Dataset folder.
Performance
This software is able to perform successful loop closing for a video sequence
containing 1745 views, 37920 3D points and 627228 image measurements in about
16min on a 2.2 GHz Core 2. The footprint in memory was <700MB.
Requirements
Solving the augmented normal equation in the LM optimizer is done with LDL, a
Cholsky like decomposition method for sparse matrices (see
http://www.cise.ufl.edu/research/sparse/ldl). The appropriate column
reordering is done with COLAMD (see
http://www.cise.ufl.edu/research/sparse/colamd). Both packages are licensed
under the GNU LGPL.
This software was developed under Linux, but should compile equally well on
other operating systems.
-Christopher Zach (cmzach@cs.unc.edu)
/*
Copyright (c) 2008 University of North Carolina at Chapel Hill
This file is part of SSBA (Simple Sparse Bundle Adjustment).
SSBA is free software: you can redistribute it and/or modify it under the
terms of the GNU Lesser General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option) any
later version.
SSBA is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License along
with SSBA. If not, see <http://www.gnu.org/licenses/>.
*/

View File

@@ -1,23 +0,0 @@
Project: SSBA
URL: http://www.cs.unc.edu/~cmzach/opensource.html
License: LGPL3
Upstream version: 1.0
Local modifications:
* Added
SET(CMAKE_CXX_FLAGS "")
to CMakeLists.txt to prevent warnings from being treated as errors.
* Fixed "unused variable" in the header files. Warnings in the cpps files
are still there.
* Fixed a bug in CameraMatrix::opticalAxis() in file
Geometry/v3d_cameramatrix.h
* Deleted the Dataset directory.
* Added '#include <string>' to ssba/Apps/bundle_common.cpp and
ssba/Apps/bundle_varying.cpp to stop undefined references to strcmp
* Removed unnecessary elements from the CMakeLists.txt file, including the
obsoleted local_config.cmake and friends.
* Added a virtual destructor to V3D::LevenbergOptimizerCommon in
Math/v3d_optimization.h
* Added /EHsc WIN32-specific flag to CMakeLists.txt
* Remove unused variable Vector3d np in bundle_common.cpp and bundle_varying (in main() function).