Move Libmv from extern/ to intern/

Logically it is intern library since being mainly developed by 1.5 blender guys.
This commit is contained in:
Sergey Sharybin
2016-01-04 18:22:27 +05:00
parent 6fb6a08bf8
commit ba432299cd
175 changed files with 25 additions and 25 deletions

View File

@@ -28,7 +28,6 @@ remove_strict_flags()
add_subdirectory(rangetree)
add_subdirectory(wcwidth)
add_subdirectory(libmv)
if(WITH_BULLET)
if(NOT WITH_SYSTEM_BULLET)

View File

@@ -1,238 +0,0 @@
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# The Original Code is Copyright (C) 2011, Blender Foundation
# All rights reserved.
#
# Contributor(s): Blender Foundation,
# Sergey Sharybin
#
# ***** END GPL LICENSE BLOCK *****
# NOTE: This file is automatically generated by bundle.sh script
# If you're doing changes in this file, please update template
# in that script too
set(INC
.
)
set(INC_SYS
)
set(SRC
libmv-capi.h
)
add_definitions(${GFLAGS_DEFINES})
add_definitions(${GLOG_DEFINES})
add_definitions(${CERES_DEFINES})
if(WITH_LIBMV)
list(APPEND INC
../gflags/src
../glog/src
third_party/ceres/include
third_party/ceres/config
../../intern/guardedalloc
)
list(APPEND INC_SYS
${EIGEN3_INCLUDE_DIRS}
${PNG_INCLUDE_DIRS}
${ZLIB_INCLUDE_DIRS}
)
add_definitions(
-DWITH_LIBMV_GUARDED_ALLOC
-DLIBMV_NO_FAST_DETECTOR=
)
list(APPEND SRC
intern/autotrack.cc
intern/camera_intrinsics.cc
intern/detector.cc
intern/frame_accessor.cc
intern/homography.cc
intern/image.cc
intern/logging.cc
intern/reconstruction.cc
intern/track_region.cc
intern/tracks.cc
intern/tracksN.cc
libmv/autotrack/autotrack.cc
libmv/autotrack/predict_tracks.cc
libmv/autotrack/tracks.cc
libmv/base/aligned_malloc.cc
libmv/image/array_nd.cc
libmv/image/convolve.cc
libmv/multiview/conditioning.cc
libmv/multiview/euclidean_resection.cc
libmv/multiview/fundamental.cc
libmv/multiview/homography.cc
libmv/multiview/panography.cc
libmv/multiview/panography_kernel.cc
libmv/multiview/projection.cc
libmv/multiview/triangulation.cc
libmv/numeric/numeric.cc
libmv/numeric/poly.cc
libmv/simple_pipeline/bundle.cc
libmv/simple_pipeline/camera_intrinsics.cc
libmv/simple_pipeline/detect.cc
libmv/simple_pipeline/distortion_models.cc
libmv/simple_pipeline/initialize_reconstruction.cc
libmv/simple_pipeline/intersect.cc
libmv/simple_pipeline/keyframe_selection.cc
libmv/simple_pipeline/modal_solver.cc
libmv/simple_pipeline/pipeline.cc
libmv/simple_pipeline/reconstruction.cc
libmv/simple_pipeline/reconstruction_scale.cc
libmv/simple_pipeline/resect.cc
libmv/simple_pipeline/tracks.cc
libmv/tracking/brute_region_tracker.cc
libmv/tracking/hybrid_region_tracker.cc
libmv/tracking/klt_region_tracker.cc
libmv/tracking/pyramid_region_tracker.cc
libmv/tracking/retrack_region_tracker.cc
libmv/tracking/track_region.cc
libmv/tracking/trklt_region_tracker.cc
intern/autotrack.h
intern/camera_intrinsics.h
intern/detector.h
intern/frame_accessor.h
intern/homography.h
intern/image.h
intern/logging.h
intern/reconstruction.h
intern/track_region.h
intern/tracks.h
intern/tracksN.h
libmv/autotrack/autotrack.h
libmv/autotrack/callbacks.h
libmv/autotrack/frame_accessor.h
libmv/autotrack/marker.h
libmv/autotrack/model.h
libmv/autotrack/predict_tracks.h
libmv/autotrack/quad.h
libmv/autotrack/reconstruction.h
libmv/autotrack/region.h
libmv/autotrack/tracks.h
libmv/base/aligned_malloc.h
libmv/base/id_generator.h
libmv/base/scoped_ptr.h
libmv/base/vector.h
libmv/base/vector_utils.h
libmv/image/array_nd.h
libmv/image/convolve.h
libmv/image/correlation.h
libmv/image/image_converter.h
libmv/image/image_drawing.h
libmv/image/image.h
libmv/image/sample.h
libmv/image/tuple.h
libmv/logging/logging.h
libmv/multiview/conditioning.h
libmv/multiview/euclidean_resection.h
libmv/multiview/fundamental.h
libmv/multiview/homography_error.h
libmv/multiview/homography.h
libmv/multiview/homography_parameterization.h
libmv/multiview/nviewtriangulation.h
libmv/multiview/panography.h
libmv/multiview/panography_kernel.h
libmv/multiview/projection.h
libmv/multiview/resection.h
libmv/multiview/triangulation.h
libmv/multiview/two_view_kernel.h
libmv/numeric/dogleg.h
libmv/numeric/function_derivative.h
libmv/numeric/levenberg_marquardt.h
libmv/numeric/numeric.h
libmv/numeric/poly.h
libmv/simple_pipeline/bundle.h
libmv/simple_pipeline/callbacks.h
libmv/simple_pipeline/camera_intrinsics.h
libmv/simple_pipeline/camera_intrinsics_impl.h
libmv/simple_pipeline/detect.h
libmv/simple_pipeline/distortion_models.h
libmv/simple_pipeline/initialize_reconstruction.h
libmv/simple_pipeline/intersect.h
libmv/simple_pipeline/keyframe_selection.h
libmv/simple_pipeline/modal_solver.h
libmv/simple_pipeline/pipeline.h
libmv/simple_pipeline/reconstruction.h
libmv/simple_pipeline/reconstruction_scale.h
libmv/simple_pipeline/resect.h
libmv/simple_pipeline/tracks.h
libmv/tracking/brute_region_tracker.h
libmv/tracking/hybrid_region_tracker.h
libmv/tracking/kalman_filter.h
libmv/tracking/klt_region_tracker.h
libmv/tracking/pyramid_region_tracker.h
libmv/tracking/region_tracker.h
libmv/tracking/retrack_region_tracker.h
libmv/tracking/track_region.h
libmv/tracking/trklt_region_tracker.h
third_party/msinttypes/inttypes.h
third_party/msinttypes/stdint.h
)
if(WITH_GTESTS)
blender_add_lib(libmv_test_dataset "./libmv/multiview/test_data_sets.cc" "" "")
BLENDER_SRC_GTEST("libmv_predict_tracks" "./libmv/autotrack/predict_tracks_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_tracks" "./libmv/autotrack/tracks_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_scoped_ptr" "./libmv/base/scoped_ptr_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_vector" "./libmv/base/vector_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_array_nd" "./libmv/image/array_nd_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_convolve" "./libmv/image/convolve_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_image" "./libmv/image/image_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_sample" "./libmv/image/sample_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_tuple" "./libmv/image/tuple_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_euclidean_resection" "./libmv/multiview/euclidean_resection_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_fundamental" "./libmv/multiview/fundamental_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_homography" "./libmv/multiview/homography_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_nviewtriangulation" "./libmv/multiview/nviewtriangulation_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_panography" "./libmv/multiview/panography_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_projection" "./libmv/multiview/projection_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_resection" "./libmv/multiview/resection_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_triangulation" "./libmv/multiview/triangulation_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_dogleg" "./libmv/numeric/dogleg_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_function_derivative" "./libmv/numeric/function_derivative_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_levenberg_marquardt" "./libmv/numeric/levenberg_marquardt_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_numeric" "./libmv/numeric/numeric_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_poly" "./libmv/numeric/poly_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_camera_intrinsics" "./libmv/simple_pipeline/camera_intrinsics_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_detect" "./libmv/simple_pipeline/detect_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_intersect" "./libmv/simple_pipeline/intersect_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_keyframe_selection" "./libmv/simple_pipeline/keyframe_selection_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_modal_solver" "./libmv/simple_pipeline/modal_solver_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_resect" "./libmv/simple_pipeline/resect_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_brute_region_tracker" "./libmv/tracking/brute_region_tracker_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_klt_region_tracker" "./libmv/tracking/klt_region_tracker_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
BLENDER_SRC_GTEST("libmv_pyramid_region_tracker" "./libmv/tracking/pyramid_region_tracker_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
endif()
else()
list(APPEND SRC
intern/stub.cc
)
endif()
blender_add_lib(extern_libmv "${SRC}" "${INC}" "${INC_SYS}")

609
extern/libmv/ChangeLog vendored
View File

@@ -1,609 +0,0 @@
commit d3537e3709fe11f42312e82cb1c9837c9e742385
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Sun Jan 3 14:17:10 2016 +0500
GLog/GFlags: Reduce difference between upstream and bundled versions
Several things here:
- Re-bundled sources using own fork with pull-requests applied on the sources.
- Got rid of changes around include "config.h", it was needed by Blender to
make it's include directories configuration to work. This could be addressed
differently from Blender side.
- Moved some customization to defines set by CMakeLists.
commit 1ec37bba2cfbbf0d6568429fa3035ee2164c23e6
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Sat Jan 2 12:42:55 2016 +0500
GFlags linking errors fix for MSVC
commit df7642b270e8e43685e9ffb404b59d7b226a9f60
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Thu Dec 31 17:56:12 2015 +0500
Alternative fix for missing prototype for a couple of functions
commit 08f685797b7d776cdaa579136c82b15ddc6ffb30
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Thu Dec 31 17:33:05 2015 +0500
Update GFlags to the latest upstream version
Makes it easier to synchronize some compiler/warning fixes.
commit e0ef5b09203e3906a555e6c2010f25cb667da9cd
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Thu Dec 31 16:15:59 2015 +0500
GLog: Solve some compilation warnings
Those are actually sent to a pull-request, see
https://github.com/google/glog/pull/81
commit 2072b213d4d3a55d099a063ed1e7331cc773454e
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Thu Dec 31 16:13:53 2015 +0500
Add Martijn Berger to the AUTHORS file
commit 4dd0770d98d67896e4f936356e281f63d927410e
Author: Martijn Berger <martijn.berger@gmail.com>
Date: Thu Dec 31 16:13:08 2015 +0500
Fix compilation error of Glog and Gflags with MSVC2015
commit 2712f42be2ad79e7d3a6c9905f6d8d1e3b7133ac
Author: Brecht Van Lommel <brechtvanlommel@gmail.com>
Date: Thu Dec 31 14:00:58 2015 +0500
Fix OS X (with 10.11 SDK) glog build errors due to using deprecated code.
Some values are now restored to the ones from before the upgrade today.
commit d249280fdf7c937fd6ebbc465508843a70aafd4c
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Wed Dec 30 16:59:28 2015 +0500
Tweaks to Glog to support building on all platforms
This makes it possible to compile Libmv on all platforms,
amount of hacks is lower, which could bring some warnings
up, but those are better be addressed via upstream which
is now rather active.
commit 86c57750ddb857643fb5dd2c83b4953da83dd57d
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Wed Dec 30 16:15:47 2015 +0500
Enable explicit Schur complement matrix by default
Gives up to 2x speed up of camera solving process in average scene.
In the really huge one it might be slower, but that we need to investigate.
commit d6c52a70b5a0664b7c74bda68f59a895fe8aa235
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Wed Dec 30 16:13:03 2015 +0500
Fix one frame memory leak when tracking last frame
commit 6e2ac41d25d5923b2a62c96d27d919a36eff9b48
Author: Brecht Van Lommel <brechtvanlommel@gmail.com>
Date: Wed Dec 30 16:11:24 2015 +0500
Motion tracking not workig with Xcode 7 on OS X.
Caused by use of the uninitialized shape_ variable in Resize().
commit fc72ae06fb4ae559ac37d14d1b34d6669505cc86
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Wed Dec 30 15:56:40 2015 +0500
Update GLog to latest upstream
Should fix issues building with MSVC2015.
commit d4b2d15bd3d195074b074331354de96a1b51042f
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Wed Dec 30 16:01:10 2015 +0500
Fix wrong README file reference
commit 2b4aa0b7720cae9a408284834559bea9960157ee
Author: Keir Mierle <mierle@gmail.com>
Date: Mon May 11 02:16:53 2015 -0700
Make README more informative for GitHub viewers
Reviewers: sergey
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D1295
commit 514e4491aea655d20be047ed87f002fb7854d5c9
Author: Keir Mierle <mierle@gmail.com>
Date: Mon May 11 01:54:09 2015 -0700
Simplify the modal solver Ceres cost function
Fix test by flipping the quaternion.
Reviewers: sergey
Reviewed By: sergey
Projects: #libmv
Differential Revision: https://developer.blender.org/D756
commit e55fafd31f7d53d42af7c6b7df2eebe3c2568da9
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Wed Dec 31 19:05:51 2014 +0500
Synchronize MSVC compilation fixes from Blender
commit 7d6020d2ec42c6cb2749bc891186b4880d26d40b
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Wed Dec 31 15:32:07 2014 +0500
Update GLog to latest upstream revision 143
Mainly to solve compilation error with demangle.cc.
commit 5dc746700eaf85cb674f0fb73ff3c1b49a7f6315
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Fri Dec 12 14:59:55 2014 +0500
Update GFlags to latest release 2.1.1
Main purpose of this (andsome of upcoming) update is to check if the
upstream sources are useable without any modifications for us. And if
not, then we'll need to consider moving some changes into upstream.
This commit contains an one-to-one copy of the upstream GFlags library
and also changes namespace usage since it's changed in the upstream.
commit 6fe6d75f7e90e161b44643b953f058a3829a5247
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Sat Nov 1 02:53:36 2014 +0500
Libmv: Code cleanup, mixed class/struct in declaration/definition
commit d2a5f7953812d2d09765431b59c6c4ac72faf35b
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Thu Oct 30 23:13:53 2014 +0500
Libmv: Support disabled color channels in tracking settings
This was never ported to a new tracking pipeline and now it's done using
FrameAccessor::Transform routines. Quite striaghtforward, but i've changed
order of grayscale conversion in blender side with call of transform callback.
This way it's much easier to perform rescaling in libmv side.
commit d976e034cdf74b34860e0632d7b29713f47c5756
Author: Keir Mierle <mierle@gmail.com>
Date: Sat Aug 23 00:38:01 2014 -0700
Minor keyframe selection cleanups
Reviewers: sergey
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D757
commit bc99ca55dadfca89fde0f93764397c2fe028943d
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Sat Aug 23 01:55:32 2014 +0600
implement backward prediction
The title actually says it all, just extend current implementation
of PredictMarkerPosition() to cases when tracking happens in the reverse
order (from the end frame to start).
it's still doesn't solve all the ambiguity happening in the function
in cases when one tracks the feature and then re-tracks it in order
to refine the sliding. This is considered a separate TODO for now and
will likely be solved by passing tracking direction to the prediction
function.
Reviewers: keir
Reviewed By: keir
Differential Revision: https://developer.blender.org/D663
commit 5b87682d98df65ade02638bc6482d824cf0dd0b3
Author: Keir Mierle <mierle@gmail.com>
Date: Thu Aug 21 22:45:22 2014 -0700
Make libmv compile on Ubuntu 14.04
Reviewers: fsiddi
Reviewed By: fsiddi
Subscribers: sergey
Differential Revision: https://developer.blender.org/D755
commit 0a81db623c458e0384b4f7060d1bcff8993fb469
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Wed Jul 23 00:42:00 2014 +0600
Fix wrong residual blocks counter
This happened in cases when having zero-weighted tracks
and could lead to some assert failures on marking parameter
block constant.
commit 2824dbac54cacf74828678be7a5c9fd960ce83e2
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Fri Jul 18 12:52:03 2014 +0600
Fix search area sliding issue
The only way to do this is to store search region in floats
and round when we need to sample it. Otherwise you'll always
have sliding effect caused by rounding the issues, especially
when doing incremental offset (thing which happens in the
prediction code).
Pretty much straightforward change apart from stuff to be kept
in mind: offset calculation int should happen relative to the
rounded search region. This is because tracker works in the space
of the search window image which get's rounded on the frame access,
This makes API a bit creepy because frame accessor uses the same
Region struct as the search window in Marker and ideally we would
need to have either IntRegion or Region<int> in order to make
Libmv fully track on what's getting rounded and when.
Reviewers: keir
Reviewed By: keir
Differential Revision: https://developer.blender.org/D616
commit 04862c479332308be47a0f27361402444ace8880
Author: Keir Mierle <mierle@gmail.com>
Date: Fri May 9 23:00:03 2014 +0200
Start the automatic 2D tracking code
This starts the 2D automatic tracking code. It is totally unfinished.
Reviewers: sergey
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D523
commit be679f67d807a2139c1f7d7e2ca45141940b30d5
Author: Keir Mierle <mierle@gmail.com>
Date: Fri May 9 14:36:04 2014 +0200
Also shift the search window
Reviewers: sergey
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D520
commit 66b8f5eef2633ebcde32a388fc14c60171011821
Author: Keir Mierle <mierle@gmail.com>
Date: Fri May 9 13:06:28 2014 +0200
Change the search region to absolute frame coordinates
Smarter Eigen usage
Better error logging
Reviewers: sergey
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D519
commit a08193319ae409fad8f08887eae1f79f02e91eaa
Author: Keir Mierle <mierle@gmail.com>
Date: Fri May 9 12:02:47 2014 +0200
First cut at predictive tracing
This adds a Kalman filter-based approach to predict where a marker
will go in the next frame to track. Hopefully this will make the
tracker work faster by avoiding lengthy searches. This code
compiles, but is otherwise untested, and likely does not work.
Fix else branch
Add some tests
Update patch coordinates as well (and test)
Reviewers: sergey
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D518
commit 607ffb2f62b56e34a841abbb952d83e19cd1e23c
Author: Keir Mierle <mierle@gmail.com>
Date: Thu May 8 16:05:28 2014 +0200
Add constructor to AutoTrack
commit c39e20a0c27da3733804c3848454b5d4c4f0e66b
Author: Keir Mierle <mierle@gmail.com>
Date: Thu May 8 16:04:20 2014 +0200
Fix GetMarker compilation issue
commit 8dd93e431b6e44439c803bfd26ec2669b656177e
Author: Keir Mierle <mierle@gmail.com>
Date: Thu May 8 15:50:26 2014 +0200
Expose GetMarker() in AutoTrack
Reviewers: sergey
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D516
commit 4405dff60ea08d454b64da1a7c0595d9328cf8a3
Author: Keir Mierle <mierle@gmail.com>
Date: Thu May 8 15:38:14 2014 +0200
Add public SetMarkers to AutoTrack
Reviewers: sergey
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D515
commit c90837f6db276a3b1f610eaad509155f6a43b24f
Author: Keir Mierle <mierle@gmail.com>
Date: Thu May 8 15:17:48 2014 +0200
Make autotrack skeleton compile
Reviewers: sergey
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D514
commit be01baa2e82e36f63e548f073157e68d2ff870c0
Author: Keir Mierle <mierle@gmail.com>
Date: Wed May 7 18:48:55 2014 +0200
Add preliminary TrackMarkerToFrame in autotrack
Reviewers: sergey
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D509
commit 0cab028d591b3d08672ca86eb6c6e4ac1aacf1d0
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Wed May 7 17:59:11 2014 +0200
Remove assert from ArrayND Resize
That assert broke initialization of arrays which doesn't
own the data since constructor uses Resize to set shape
and strides.
Strides are still to be fixed, but that's for later.
commit 64f9c118029a9351e9023e96527c120e1d724d5b
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Wed May 7 17:42:21 2014 +0200
Fix ArrayND freeing the data it doesn't own
Can't really guarantee it works fully correct now,
but at least this check is needed anyway and compilation
works just fine.
Reviewers: keir
Reviewed By: keir
Differential Revision: https://developer.blender.org/D508
commit 0618f1c8e88dfc738cdde55784da80b889905e7c
Author: Keir Mierle <mierle@gmail.com>
Date: Wed May 7 12:03:32 2014 +0200
Minor changes
Reviewers: sergey
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D505
commit 5c34335e1bb90c4ed701ee830c718ed4e20dbffa
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Wed May 7 11:12:23 2014 +0200
Fix compilation error in frame accessor
- int64 is not a standard type, we've got int64_t defined in
std int. We also have an msvc port of this header, so should
not be an issue.
- Fixed inconsistency in usage of CacheKey and Key, used Key.
- Some functions weren't marked as virtual.
Additional change: added self to authors.
Reviewers: keir
Reviewed By: keir
Differential Revision: https://developer.blender.org/D504
commit 06bc207614e262cd688e2c3ed820ade7c77bdb66
Author: Keir Mierle <mierle@gmail.com>
Date: Tue May 6 22:30:59 2014 +0200
Start new Tracks implementation
This adds the new Tracks implementation, as well as a
trivial test to show it compiles.
Reviewers: sergey
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D502
commit 25ce061e6da69881460ba7718bb0d660a2380a02
Author: Keir Mierle <mierle@gmail.com>
Date: Tue May 6 19:10:51 2014 +0200
Add Reconstruction class for new API
This starts the new Reconstruction class (with support for e.g. planes). This
also starts the new namespace "mv" which will eventually have all the symbols
we wish to export.
Reviewers: sergey
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D501
commit 0a6af3e29016048978aea607673340500e050339
Author: Keir Mierle <mierle@gmail.com>
Date: Tue May 6 17:52:53 2014 +0200
Add a new Tracks implementation
Reviewers: sergey
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D500
commit 887b68d29c2b198f4939f9ab5153881aa2c1806e
Author: Keir Mierle <mierle@gmail.com>
Date: Tue May 6 17:01:39 2014 +0200
Initial commit of unfinished AutoTrack API
This starts the creating the new AutoTrack API. The new API will
make it possible for libmv to do full autotracking, including
predictive tracking and also support multiple motion models (3D
planes etc).
The first goal (not in this patch) is to convert Blender to use
the new API without adding any new functionality.
Note: This does not add any of the API to the build system!
It likely does not compile.
Reviewers: sergey
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D499
commit 08cc227d431d257d27f300fbb8e6991e663302da
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Tue May 6 13:09:22 2014 +0200
Fix homography test failure
It was caused by assuming that reconstructed homography matrix
should look exactly the same as the matrix used to generate a
test case.
It's not actually valid assumption because different-looking
matrices could correspond to the same exact transform.
In this change we make it so actual "re-projected" vectors
are being checked, not the values in matrix. This makes it
more predictable verification.
Reviewers: keir
Reviewed By: keir
Differential Revision: https://developer.blender.org/D488
commit 0b7d83dc9627447dc7df64d7e3a468aefe9ddc13
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Wed Apr 23 19:14:55 2014 +0600
Fix compilation on OSX after previous commit
EXPECT_EQ wasn't defined in the scope.
commit d14049e00dabf8fdf49056779f0a3718fbb39e8f
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Wed Apr 23 15:08:16 2014 +0600
Move aligned malloc implementation into own file
It was rather stupid having it in brute region tracker,
now it is in own file in base library (which was also
added in this commit, before this it consist of header
files only).
Reviewers: keir
Reviewed By: keir
Differential Revision: https://developer.blender.org/D479
commit 0ddf3851bfcb8de43660b119a25a77a25674200d
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Mon Apr 21 14:14:03 2014 +0600
Optimization of PearsonProductMomentCorrelation
Pass the arrays by reference rather than by value,
should give some percent of speedup.
Also don't pass the dimensions to the function but
get them from the images themselves.
Hopefully this will give some %% of tracker speedup.
commit f68fdbe5896a6c5bd8b500caeec61b876c5e44c6
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Mon Apr 21 14:10:43 2014 +0600
Fix wrong assert in ResizeImage()
The assert didn't make any sense because ComputeBoundingBox()
is intended to return bounding box in the following way:
(xmin, xmax, ymin, ymax).
commit 1d386b6775a71c499e9b8e4a288c0785c4937677
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Thu Apr 17 18:42:43 2014 +0600
Add unit tests for buffer (un)distortion
Currently only uses identity camera intrinsics just to
see whether lookup grids are properly allocated.
Should prevent accidents like that one happened recently
with crashing Blender after Libmv re-integration.

187
extern/libmv/bundle.sh vendored
View File

@@ -1,187 +0,0 @@
#!/bin/sh
if [ "x$1" = "x--i-really-know-what-im-doing" ] ; then
echo Proceeding as requested by command line ...
else
echo "*** Please run again with --i-really-know-what-im-doing ..."
exit 1
fi
BRANCH="master"
repo="git://git.blender.org/libmv.git"
tmp=`mktemp -d`
git clone -b $BRANCH $repo $tmp/libmv
git --git-dir $tmp/libmv/.git --work-tree $tmp/libmv log -n 50 > ChangeLog
find libmv -type f -exec rm -rf {} \;
find third_party -type f -exec rm -rf {} \;
cat "files.txt" | while read f; do
mkdir -p `dirname $f`
cp $tmp/libmv/src/$f $f
done
rm -rf $tmp
sources=`find ./libmv -type f -iname '*.cc' -or -iname '*.cpp' -or -iname '*.c' | grep -v _test.cc | grep -v test_data_sets | sed -r 's/^\.\//\t\t/' | sort -d`
headers=`find ./libmv -type f -iname '*.h' | grep -v test_data_sets | sed -r 's/^\.\//\t\t/' | sort -d`
third_sources=`find ./third_party -type f -iname '*.cc' -or -iname '*.cpp' -or -iname '*.c' | sed -r 's/^\.\//\t\t/' | sort -d`
third_headers=`find ./third_party -type f -iname '*.h' | sed -r 's/^\.\//\t\t/' | sort -d`
tests=`find ./libmv -type f -iname '*_test.cc' | sort -d | awk ' { name=gensub(".*/([A-Za-z_]+)_test.cc", "\\\\1", $1); printf("\t\tBLENDER_SRC_GTEST(\"libmv_%s\" \"%s\" \"libmv_test_dataset;extern_libmv;extern_ceres\")\n", name, $1) } '`
src_dir=`find ./libmv -type f -iname '*.cc' -exec dirname {} \; -or -iname '*.cpp' -exec dirname {} \; -or -iname '*.c' -exec dirname {} \; | sed -r 's/^\.\//\t\t/' | sort -d | uniq`
src_third_dir=`find ./third_party -type f -iname '*.cc' -exec dirname {} \; -or -iname '*.cpp' -exec dirname {} \; -or -iname '*.c' -exec dirname {} \; | sed -r 's/^\.\//\t\t/' | sort -d | uniq`
src=""
win_src=""
for x in $src_dir $src_third_dir; do
t=""
if stat $x/*.cpp > /dev/null 2>&1; then
t=" src += env.Glob('`echo $x'/*.cpp'`')"
fi
if stat $x/*.c > /dev/null 2>&1; then
if [ -z "$t" ]; then
t=" src += env.Glob('`echo $x'/*.c'`')"
else
t="$t + env.Glob('`echo $x'/*.c'`')"
fi
fi
if stat $x/*.cc > /dev/null 2>&1; then
if [ -z "$t" ]; then
t=" src += env.Glob('`echo $x'/*.cc'`')"
else
t="$t + env.Glob('`echo $x'/*.cc'`')"
fi
fi
if test `echo $x | grep -c "windows\|gflags" ` -eq 0; then
if [ -z "$src" ]; then
src=$t
else
src=`echo "$src\n$t"`
fi
else
if [ -z "$win_src" ]; then
win_src=`echo " $t"`
else
win_src=`echo "$win_src\n $t"`
fi
fi
done
cat > CMakeLists.txt << EOF
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# The Original Code is Copyright (C) 2011, Blender Foundation
# All rights reserved.
#
# Contributor(s): Blender Foundation,
# Sergey Sharybin
#
# ***** END GPL LICENSE BLOCK *****
# NOTE: This file is automatically generated by bundle.sh script
# If you're doing changes in this file, please update template
# in that script too
set(INC
.
)
set(INC_SYS
)
set(SRC
libmv-capi.h
)
add_definitions(\${GFLAGS_DEFINES})
add_definitions(\${GLOG_DEFINES})
add_definitions(\${CERES_DEFINES})
if(WITH_LIBMV)
list(APPEND INC
../gflags/src
../glog/src
third_party/ceres/include
third_party/ceres/config
../../intern/guardedalloc
)
list(APPEND INC_SYS
\${EIGEN3_INCLUDE_DIRS}
\${PNG_INCLUDE_DIRS}
\${ZLIB_INCLUDE_DIRS}
)
add_definitions(
-DWITH_LIBMV_GUARDED_ALLOC
-DLIBMV_NO_FAST_DETECTOR=
)
list(APPEND SRC
intern/autotrack.cc
intern/camera_intrinsics.cc
intern/detector.cc
intern/frame_accessor.cc
intern/homography.cc
intern/image.cc
intern/logging.cc
intern/reconstruction.cc
intern/track_region.cc
intern/tracks.cc
intern/tracksN.cc
${sources}
${third_sources}
intern/autotrack.h
intern/camera_intrinsics.h
intern/detector.h
intern/frame_accessor.h
intern/homography.h
intern/image.h
intern/logging.h
intern/reconstruction.h
intern/track_region.h
intern/tracks.h
intern/tracksN.h
${headers}
${third_headers}
)
if(WITH_GTESTS)
blender_add_lib(libmv_test_dataset "./libmv/multiview/test_data_sets.cc" "${INC}" "${INC_SYS}")
${tests}
endif()
else()
list(APPEND SRC
intern/stub.cc
)
endif()
blender_add_lib(extern_libmv "\${SRC}" "\${INC}" "\${INC_SYS}")
EOF

138
extern/libmv/files.txt vendored
View File

@@ -1,138 +0,0 @@
libmv/autotrack/autotrack.cc
libmv/autotrack/autotrack.h
libmv/autotrack/callbacks.h
libmv/autotrack/frame_accessor.h
libmv/autotrack/marker.h
libmv/autotrack/model.h
libmv/autotrack/predict_tracks.cc
libmv/autotrack/predict_tracks.h
libmv/autotrack/predict_tracks_test.cc
libmv/autotrack/quad.h
libmv/autotrack/reconstruction.h
libmv/autotrack/region.h
libmv/autotrack/tracks.cc
libmv/autotrack/tracks.h
libmv/autotrack/tracks_test.cc
libmv/base/aligned_malloc.cc
libmv/base/aligned_malloc.h
libmv/base/id_generator.h
libmv/base/scoped_ptr.h
libmv/base/scoped_ptr_test.cc
libmv/base/vector.h
libmv/base/vector_test.cc
libmv/base/vector_utils.h
libmv/image/array_nd.cc
libmv/image/array_nd.h
libmv/image/array_nd_test.cc
libmv/image/convolve.cc
libmv/image/convolve.h
libmv/image/convolve_test.cc
libmv/image/correlation.h
libmv/image/image_converter.h
libmv/image/image_drawing.h
libmv/image/image.h
libmv/image/image_test.cc
libmv/image/sample.h
libmv/image/sample_test.cc
libmv/image/tuple.h
libmv/image/tuple_test.cc
libmv/logging/logging.h
libmv/multiview/conditioning.cc
libmv/multiview/conditioning.h
libmv/multiview/euclidean_resection.cc
libmv/multiview/euclidean_resection.h
libmv/multiview/euclidean_resection_test.cc
libmv/multiview/fundamental.cc
libmv/multiview/fundamental.h
libmv/multiview/fundamental_test.cc
libmv/multiview/homography.cc
libmv/multiview/homography_error.h
libmv/multiview/homography.h
libmv/multiview/homography_parameterization.h
libmv/multiview/homography_test.cc
libmv/multiview/nviewtriangulation.h
libmv/multiview/nviewtriangulation_test.cc
libmv/multiview/panography.cc
libmv/multiview/panography.h
libmv/multiview/panography_kernel.cc
libmv/multiview/panography_kernel.h
libmv/multiview/panography_test.cc
libmv/multiview/projection.cc
libmv/multiview/projection.h
libmv/multiview/projection_test.cc
libmv/multiview/resection.h
libmv/multiview/resection_test.cc
libmv/multiview/test_data_sets.cc
libmv/multiview/test_data_sets.h
libmv/multiview/triangulation.cc
libmv/multiview/triangulation.h
libmv/multiview/triangulation_test.cc
libmv/multiview/two_view_kernel.h
libmv/numeric/dogleg.h
libmv/numeric/dogleg_test.cc
libmv/numeric/function_derivative.h
libmv/numeric/function_derivative_test.cc
libmv/numeric/levenberg_marquardt.h
libmv/numeric/levenberg_marquardt_test.cc
libmv/numeric/numeric.cc
libmv/numeric/numeric.h
libmv/numeric/numeric_test.cc
libmv/numeric/poly.cc
libmv/numeric/poly.h
libmv/numeric/poly_test.cc
libmv/simple_pipeline/bundle.cc
libmv/simple_pipeline/bundle.h
libmv/simple_pipeline/callbacks.h
libmv/simple_pipeline/camera_intrinsics.cc
libmv/simple_pipeline/camera_intrinsics.h
libmv/simple_pipeline/camera_intrinsics_impl.h
libmv/simple_pipeline/camera_intrinsics_test.cc
libmv/simple_pipeline/detect.cc
libmv/simple_pipeline/detect.h
libmv/simple_pipeline/detect_test.cc
libmv/simple_pipeline/distortion_models.cc
libmv/simple_pipeline/distortion_models.h
libmv/simple_pipeline/initialize_reconstruction.cc
libmv/simple_pipeline/initialize_reconstruction.h
libmv/simple_pipeline/intersect.cc
libmv/simple_pipeline/intersect.h
libmv/simple_pipeline/intersect_test.cc
libmv/simple_pipeline/keyframe_selection.cc
libmv/simple_pipeline/keyframe_selection.h
libmv/simple_pipeline/keyframe_selection_test.cc
libmv/simple_pipeline/modal_solver.cc
libmv/simple_pipeline/modal_solver.h
libmv/simple_pipeline/modal_solver_test.cc
libmv/simple_pipeline/pipeline.cc
libmv/simple_pipeline/pipeline.h
libmv/simple_pipeline/reconstruction.cc
libmv/simple_pipeline/reconstruction.h
libmv/simple_pipeline/reconstruction_scale.cc
libmv/simple_pipeline/reconstruction_scale.h
libmv/simple_pipeline/resect.cc
libmv/simple_pipeline/resect.h
libmv/simple_pipeline/resect_test.cc
libmv/simple_pipeline/tracks.cc
libmv/simple_pipeline/tracks.h
libmv/tracking/brute_region_tracker.cc
libmv/tracking/brute_region_tracker.h
libmv/tracking/brute_region_tracker_test.cc
libmv/tracking/hybrid_region_tracker.cc
libmv/tracking/hybrid_region_tracker.h
libmv/tracking/kalman_filter.h
libmv/tracking/klt_region_tracker.cc
libmv/tracking/klt_region_tracker.h
libmv/tracking/klt_region_tracker_test.cc
libmv/tracking/pyramid_region_tracker.cc
libmv/tracking/pyramid_region_tracker.h
libmv/tracking/pyramid_region_tracker_test.cc
libmv/tracking/region_tracker.h
libmv/tracking/retrack_region_tracker.cc
libmv/tracking/retrack_region_tracker.h
libmv/tracking/track_region.cc
libmv/tracking/track_region.h
libmv/tracking/trklt_region_tracker.cc
libmv/tracking/trklt_region_tracker.h
third_party/msinttypes/inttypes.h
third_party/msinttypes/README.libmv
third_party/msinttypes/stdint.h

View File

@@ -1,99 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2014 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#include "intern/autotrack.h"
#include "intern/tracksN.h"
#include "intern/utildefines.h"
#include "libmv/autotrack/autotrack.h"
using mv::AutoTrack;
using mv::FrameAccessor;
using mv::Marker;
using libmv::TrackRegionOptions;
using libmv::TrackRegionResult;
libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor *frame_accessor) {
return (libmv_AutoTrack*) LIBMV_OBJECT_NEW(AutoTrack,
(FrameAccessor*) frame_accessor);
}
void libmv_autoTrackDestroy(libmv_AutoTrack* libmv_autotrack) {
LIBMV_OBJECT_DELETE(libmv_autotrack, AutoTrack);
}
void libmv_autoTrackSetOptions(libmv_AutoTrack* libmv_autotrack,
const libmv_AutoTrackOptions* options) {
AutoTrack *autotrack = ((AutoTrack*) libmv_autotrack);
libmv_configureTrackRegionOptions(options->track_region,
&autotrack->options.track_region);
autotrack->options.search_region.min(0) = options->search_region.min[0];
autotrack->options.search_region.min(1) = options->search_region.min[1];
autotrack->options.search_region.max(0) = options->search_region.max[0];
autotrack->options.search_region.max(1) = options->search_region.max[1];
}
int libmv_autoTrackMarker(libmv_AutoTrack* libmv_autotrack,
const libmv_TrackRegionOptions* libmv_options,
libmv_Marker *libmv_tracked_marker,
libmv_TrackRegionResult* libmv_result) {
Marker tracked_marker;
TrackRegionOptions options;
TrackRegionResult result;
libmv_apiMarkerToMarker(*libmv_tracked_marker, &tracked_marker);
libmv_configureTrackRegionOptions(*libmv_options,
&options);
(((AutoTrack*) libmv_autotrack)->TrackMarker(&tracked_marker,
&result,
&options));
libmv_markerToApiMarker(tracked_marker, libmv_tracked_marker);
libmv_regionTrackergetResult(result, libmv_result);
return result.is_usable();
}
void libmv_autoTrackAddMarker(libmv_AutoTrack* libmv_autotrack,
const libmv_Marker* libmv_marker) {
Marker marker;
libmv_apiMarkerToMarker(*libmv_marker, &marker);
((AutoTrack*) libmv_autotrack)->AddMarker(marker);
}
int libmv_autoTrackGetMarker(libmv_AutoTrack* libmv_autotrack,
int clip,
int frame,
int track,
libmv_Marker *libmv_marker) {
Marker marker;
int ok = ((AutoTrack*) libmv_autotrack)->GetMarker(clip,
frame,
track,
&marker);
if (ok) {
libmv_markerToApiMarker(marker, libmv_marker);
}
return ok;
}

View File

@@ -1,71 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2014 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#ifndef LIBMV_C_API_AUTOTRACK_H_
#define LIBMV_C_API_AUTOTRACK_H_
#include "intern/frame_accessor.h"
#include "intern/tracksN.h"
#include "intern/track_region.h"
#include "intern/region.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct libmv_AutoTrack libmv_AutoTrack;
typedef struct libmv_AutoTrackOptions {
libmv_TrackRegionOptions track_region;
libmv_Region search_region;
} libmv_AutoTrackOptions;
libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor *frame_accessor);
void libmv_autoTrackDestroy(libmv_AutoTrack* libmv_autotrack);
void libmv_autoTrackSetOptions(libmv_AutoTrack* libmv_autotrack,
const libmv_AutoTrackOptions* options);
int libmv_autoTrackMarker(libmv_AutoTrack* libmv_autotrack,
const libmv_TrackRegionOptions* libmv_options,
libmv_Marker *libmv_tracker_marker,
libmv_TrackRegionResult* libmv_result);
void libmv_autoTrackAddMarker(libmv_AutoTrack* libmv_autotrack,
const libmv_Marker* libmv_marker);
int libmv_autoTrackGetMarker(libmv_AutoTrack* libmv_autotrack,
int clip,
int frame,
int track,
libmv_Marker *libmv_marker);
#ifdef __cplusplus
}
#endif
#endif // LIBMV_C_API_TRACKS_H_

View File

@@ -1,355 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#include "intern/camera_intrinsics.h"
#include "intern/utildefines.h"
#include "libmv/simple_pipeline/camera_intrinsics.h"
using libmv::CameraIntrinsics;
using libmv::DivisionCameraIntrinsics;
using libmv::PolynomialCameraIntrinsics;
libmv_CameraIntrinsics *libmv_cameraIntrinsicsNew(
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options) {
CameraIntrinsics *camera_intrinsics =
libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
return (libmv_CameraIntrinsics *) camera_intrinsics;
}
libmv_CameraIntrinsics *libmv_cameraIntrinsicsCopy(
const libmv_CameraIntrinsics* libmv_intrinsics) {
const CameraIntrinsics *orig_intrinsics =
(const CameraIntrinsics *) libmv_intrinsics;
CameraIntrinsics *new_intrinsics = NULL;
switch (orig_intrinsics->GetDistortionModelType()) {
case libmv::DISTORTION_MODEL_POLYNOMIAL:
{
const PolynomialCameraIntrinsics *polynomial_intrinsics =
static_cast<const PolynomialCameraIntrinsics*>(orig_intrinsics);
new_intrinsics = LIBMV_OBJECT_NEW(PolynomialCameraIntrinsics,
*polynomial_intrinsics);
break;
}
case libmv::DISTORTION_MODEL_DIVISION:
{
const DivisionCameraIntrinsics *division_intrinsics =
static_cast<const DivisionCameraIntrinsics*>(orig_intrinsics);
new_intrinsics = LIBMV_OBJECT_NEW(DivisionCameraIntrinsics,
*division_intrinsics);
break;
}
default:
assert(!"Unknown distortion model");
}
return (libmv_CameraIntrinsics *) new_intrinsics;
}
void libmv_cameraIntrinsicsDestroy(libmv_CameraIntrinsics* libmv_intrinsics) {
LIBMV_OBJECT_DELETE(libmv_intrinsics, CameraIntrinsics);
}
void libmv_cameraIntrinsicsUpdate(
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
libmv_CameraIntrinsics* libmv_intrinsics) {
CameraIntrinsics *camera_intrinsics = (CameraIntrinsics *) libmv_intrinsics;
double focal_length = libmv_camera_intrinsics_options->focal_length;
double principal_x = libmv_camera_intrinsics_options->principal_point_x;
double principal_y = libmv_camera_intrinsics_options->principal_point_y;
int image_width = libmv_camera_intrinsics_options->image_width;
int image_height = libmv_camera_intrinsics_options->image_height;
/* Try avoid unnecessary updates, so pre-computed distortion grids
* are not freed.
*/
if (camera_intrinsics->focal_length() != focal_length) {
camera_intrinsics->SetFocalLength(focal_length, focal_length);
}
if (camera_intrinsics->principal_point_x() != principal_x ||
camera_intrinsics->principal_point_y() != principal_y) {
camera_intrinsics->SetPrincipalPoint(principal_x, principal_y);
}
if (camera_intrinsics->image_width() != image_width ||
camera_intrinsics->image_height() != image_height) {
camera_intrinsics->SetImageSize(image_width, image_height);
}
switch (libmv_camera_intrinsics_options->distortion_model) {
case LIBMV_DISTORTION_MODEL_POLYNOMIAL:
{
assert(camera_intrinsics->GetDistortionModelType() ==
libmv::DISTORTION_MODEL_POLYNOMIAL);
PolynomialCameraIntrinsics *polynomial_intrinsics =
(PolynomialCameraIntrinsics *) camera_intrinsics;
double k1 = libmv_camera_intrinsics_options->polynomial_k1;
double k2 = libmv_camera_intrinsics_options->polynomial_k2;
double k3 = libmv_camera_intrinsics_options->polynomial_k3;
if (polynomial_intrinsics->k1() != k1 ||
polynomial_intrinsics->k2() != k2 ||
polynomial_intrinsics->k3() != k3) {
polynomial_intrinsics->SetRadialDistortion(k1, k2, k3);
}
break;
}
case LIBMV_DISTORTION_MODEL_DIVISION:
{
assert(camera_intrinsics->GetDistortionModelType() ==
libmv::DISTORTION_MODEL_DIVISION);
DivisionCameraIntrinsics *division_intrinsics =
(DivisionCameraIntrinsics *) camera_intrinsics;
double k1 = libmv_camera_intrinsics_options->division_k1;
double k2 = libmv_camera_intrinsics_options->division_k2;
if (division_intrinsics->k1() != k1 ||
division_intrinsics->k2() != k2) {
division_intrinsics->SetDistortion(k1, k2);
}
break;
}
default:
assert(!"Unknown distortion model");
}
}
void libmv_cameraIntrinsicsSetThreads(libmv_CameraIntrinsics* libmv_intrinsics,
int threads) {
CameraIntrinsics *camera_intrinsics = (CameraIntrinsics *) libmv_intrinsics;
camera_intrinsics->SetThreads(threads);
}
void libmv_cameraIntrinsicsExtractOptions(
const libmv_CameraIntrinsics* libmv_intrinsics,
libmv_CameraIntrinsicsOptions* camera_intrinsics_options) {
const CameraIntrinsics *camera_intrinsics =
(const CameraIntrinsics *) libmv_intrinsics;
// Fill in options which are common for all distortion models.
camera_intrinsics_options->focal_length = camera_intrinsics->focal_length();
camera_intrinsics_options->principal_point_x =
camera_intrinsics->principal_point_x();
camera_intrinsics_options->principal_point_y =
camera_intrinsics->principal_point_y();
camera_intrinsics_options->image_width = camera_intrinsics->image_width();
camera_intrinsics_options->image_height = camera_intrinsics->image_height();
switch (camera_intrinsics->GetDistortionModelType()) {
case libmv::DISTORTION_MODEL_POLYNOMIAL:
{
const PolynomialCameraIntrinsics *polynomial_intrinsics =
static_cast<const PolynomialCameraIntrinsics *>(camera_intrinsics);
camera_intrinsics_options->distortion_model =
LIBMV_DISTORTION_MODEL_POLYNOMIAL;
camera_intrinsics_options->polynomial_k1 = polynomial_intrinsics->k1();
camera_intrinsics_options->polynomial_k2 = polynomial_intrinsics->k2();
camera_intrinsics_options->polynomial_k3 = polynomial_intrinsics->k3();
camera_intrinsics_options->polynomial_p1 = polynomial_intrinsics->p1();
camera_intrinsics_options->polynomial_p1 = polynomial_intrinsics->p2();
break;
}
case libmv::DISTORTION_MODEL_DIVISION:
{
const DivisionCameraIntrinsics *division_intrinsics =
static_cast<const DivisionCameraIntrinsics *>(camera_intrinsics);
camera_intrinsics_options->distortion_model =
LIBMV_DISTORTION_MODEL_DIVISION;
camera_intrinsics_options->division_k1 = division_intrinsics->k1();
camera_intrinsics_options->division_k2 = division_intrinsics->k2();
break;
}
default:
assert(!"Uknown distortion model");
}
}
void libmv_cameraIntrinsicsUndistortByte(
const libmv_CameraIntrinsics* libmv_intrinsics,
const unsigned char *source_image,
int width,
int height,
float overscan,
int channels,
unsigned char* destination_image) {
CameraIntrinsics *camera_intrinsics = (CameraIntrinsics *) libmv_intrinsics;
camera_intrinsics->UndistortBuffer(source_image,
width, height,
overscan,
channels,
destination_image);
}
void libmv_cameraIntrinsicsUndistortFloat(
const libmv_CameraIntrinsics* libmv_intrinsics,
const float* source_image,
int width,
int height,
float overscan,
int channels,
float* destination_image) {
CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmv_intrinsics;
intrinsics->UndistortBuffer(source_image,
width, height,
overscan,
channels,
destination_image);
}
void libmv_cameraIntrinsicsDistortByte(
const struct libmv_CameraIntrinsics* libmv_intrinsics,
const unsigned char *source_image,
int width,
int height,
float overscan,
int channels,
unsigned char *destination_image) {
CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmv_intrinsics;
intrinsics->DistortBuffer(source_image,
width, height,
overscan,
channels,
destination_image);
}
void libmv_cameraIntrinsicsDistortFloat(
const libmv_CameraIntrinsics* libmv_intrinsics,
float* source_image,
int width,
int height,
float overscan,
int channels,
float* destination_image) {
CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmv_intrinsics;
intrinsics->DistortBuffer(source_image,
width, height,
overscan,
channels,
destination_image);
}
void libmv_cameraIntrinsicsApply(
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
double x,
double y,
double* x1,
double* y1) {
/* Do a lens distortion if focal length is non-zero only. */
if (libmv_camera_intrinsics_options->focal_length) {
CameraIntrinsics* camera_intrinsics =
libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
camera_intrinsics->ApplyIntrinsics(x, y, x1, y1);
LIBMV_OBJECT_DELETE(camera_intrinsics, CameraIntrinsics);
}
}
void libmv_cameraIntrinsicsInvert(
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
double x,
double y,
double* x1,
double* y1) {
/* Do a lens un-distortion if focal length is non-zero only/ */
if (libmv_camera_intrinsics_options->focal_length) {
CameraIntrinsics *camera_intrinsics =
libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
camera_intrinsics->InvertIntrinsics(x, y, x1, y1);
LIBMV_OBJECT_DELETE(camera_intrinsics, CameraIntrinsics);
}
}
static void libmv_cameraIntrinsicsFillFromOptions(
const libmv_CameraIntrinsicsOptions* camera_intrinsics_options,
CameraIntrinsics* camera_intrinsics) {
camera_intrinsics->SetFocalLength(camera_intrinsics_options->focal_length,
camera_intrinsics_options->focal_length);
camera_intrinsics->SetPrincipalPoint(
camera_intrinsics_options->principal_point_x,
camera_intrinsics_options->principal_point_y);
camera_intrinsics->SetImageSize(camera_intrinsics_options->image_width,
camera_intrinsics_options->image_height);
switch (camera_intrinsics_options->distortion_model) {
case LIBMV_DISTORTION_MODEL_POLYNOMIAL:
{
PolynomialCameraIntrinsics *polynomial_intrinsics =
static_cast<PolynomialCameraIntrinsics*>(camera_intrinsics);
polynomial_intrinsics->SetRadialDistortion(
camera_intrinsics_options->polynomial_k1,
camera_intrinsics_options->polynomial_k2,
camera_intrinsics_options->polynomial_k3);
break;
}
case LIBMV_DISTORTION_MODEL_DIVISION:
{
DivisionCameraIntrinsics *division_intrinsics =
static_cast<DivisionCameraIntrinsics*>(camera_intrinsics);
division_intrinsics->SetDistortion(
camera_intrinsics_options->division_k1,
camera_intrinsics_options->division_k2);
break;
}
default:
assert(!"Unknown distortion model");
}
}
CameraIntrinsics* libmv_cameraIntrinsicsCreateFromOptions(
const libmv_CameraIntrinsicsOptions* camera_intrinsics_options) {
CameraIntrinsics *camera_intrinsics = NULL;
switch (camera_intrinsics_options->distortion_model) {
case LIBMV_DISTORTION_MODEL_POLYNOMIAL:
camera_intrinsics = LIBMV_OBJECT_NEW(PolynomialCameraIntrinsics);
break;
case LIBMV_DISTORTION_MODEL_DIVISION:
camera_intrinsics = LIBMV_OBJECT_NEW(DivisionCameraIntrinsics);
break;
default:
assert(!"Unknown distortion model");
}
libmv_cameraIntrinsicsFillFromOptions(camera_intrinsics_options,
camera_intrinsics);
return camera_intrinsics;
}

View File

@@ -1,138 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#ifndef LIBMV_C_API_CAMERA_INTRINSICS_H_
#define LIBMV_C_API_CAMERA_INTRINSICS_H_
#ifdef __cplusplus
extern "C" {
#endif
typedef struct libmv_CameraIntrinsics libmv_CameraIntrinsics;
enum {
LIBMV_DISTORTION_MODEL_POLYNOMIAL = 0,
LIBMV_DISTORTION_MODEL_DIVISION = 1,
};
typedef struct libmv_CameraIntrinsicsOptions {
// Common settings of all distortion models.
int distortion_model;
int image_width, image_height;
double focal_length;
double principal_point_x, principal_point_y;
// Radial distortion model.
double polynomial_k1, polynomial_k2, polynomial_k3;
double polynomial_p1, polynomial_p2;
// Division distortion model.
double division_k1, division_k2;
} libmv_CameraIntrinsicsOptions;
libmv_CameraIntrinsics *libmv_cameraIntrinsicsNew(
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options);
libmv_CameraIntrinsics *libmv_cameraIntrinsicsCopy(
const libmv_CameraIntrinsics* libmv_intrinsics);
void libmv_cameraIntrinsicsDestroy(libmv_CameraIntrinsics* libmv_intrinsics);
void libmv_cameraIntrinsicsUpdate(
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
libmv_CameraIntrinsics* libmv_intrinsics);
void libmv_cameraIntrinsicsSetThreads(libmv_CameraIntrinsics* libmv_intrinsics,
int threads);
void libmv_cameraIntrinsicsExtractOptions(
const libmv_CameraIntrinsics* libmv_intrinsics,
libmv_CameraIntrinsicsOptions* camera_intrinsics_options);
void libmv_cameraIntrinsicsUndistortByte(
const libmv_CameraIntrinsics* libmv_intrinsics,
const unsigned char *source_image,
int width,
int height,
float overscan,
int channels,
unsigned char* destination_image);
void libmv_cameraIntrinsicsUndistortFloat(
const libmv_CameraIntrinsics* libmv_intrinsics,
const float* source_image,
int width,
int height,
float overscan,
int channels,
float* destination_image);
void libmv_cameraIntrinsicsDistortByte(
const struct libmv_CameraIntrinsics* libmv_intrinsics,
const unsigned char *source_image,
int width,
int height,
float overscan,
int channels,
unsigned char *destination_image);
void libmv_cameraIntrinsicsDistortFloat(
const libmv_CameraIntrinsics* libmv_intrinsics,
float* source_image,
int width,
int height,
float overscan,
int channels,
float* destination_image);
void libmv_cameraIntrinsicsApply(
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
double x,
double y,
double* x1,
double* y1);
void libmv_cameraIntrinsicsInvert(
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
double x,
double y,
double* x1,
double* y1);
#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
namespace libmv {
class CameraIntrinsics;
}
libmv::CameraIntrinsics* libmv_cameraIntrinsicsCreateFromOptions(
const libmv_CameraIntrinsicsOptions* camera_intrinsics_options);
#endif
#endif // LIBMV_C_API_CAMERA_INTRINSICS_H_

View File

@@ -1,148 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#include "intern/detector.h"
#include "intern/image.h"
#include "intern/utildefines.h"
#include "libmv/simple_pipeline/detect.h"
using libmv::Detect;
using libmv::DetectOptions;
using libmv::Feature;
using libmv::FloatImage;
struct libmv_Features {
int count;
Feature* features;
};
namespace {
libmv_Features *libmv_featuresFromVector(
const libmv::vector<Feature>& features) {
libmv_Features* libmv_features = LIBMV_STRUCT_NEW(libmv_Features, 1);
int count = features.size();
if (count) {
libmv_features->features = LIBMV_STRUCT_NEW(Feature, count);
for (int i = 0; i < count; i++) {
libmv_features->features[i] = features.at(i);
}
} else {
libmv_features->features = NULL;
}
libmv_features->count = count;
return libmv_features;
}
void libmv_convertDetectorOptions(libmv_DetectOptions *options,
DetectOptions *detector_options) {
switch (options->detector) {
#define LIBMV_CONVERT(the_detector) \
case LIBMV_DETECTOR_ ## the_detector: \
detector_options->type = DetectOptions::the_detector; \
break;
LIBMV_CONVERT(FAST)
LIBMV_CONVERT(MORAVEC)
LIBMV_CONVERT(HARRIS)
#undef LIBMV_CONVERT
}
detector_options->margin = options->margin;
detector_options->min_distance = options->min_distance;
detector_options->fast_min_trackness = options->fast_min_trackness;
detector_options->moravec_max_count = options->moravec_max_count;
detector_options->moravec_pattern = options->moravec_pattern;
detector_options->harris_threshold = options->harris_threshold;
}
} // namespace
libmv_Features *libmv_detectFeaturesByte(const unsigned char* image_buffer,
int width,
int height,
int channels,
libmv_DetectOptions* options) {
// Prepare the image.
FloatImage image;
libmv_byteBufferToFloatImage(image_buffer, width, height, channels, &image);
// Configure detector.
DetectOptions detector_options;
libmv_convertDetectorOptions(options, &detector_options);
// Run the detector.
libmv::vector<Feature> detected_features;
Detect(image, detector_options, &detected_features);
// Convert result to C-API.
libmv_Features* result = libmv_featuresFromVector(detected_features);
return result;
}
libmv_Features* libmv_detectFeaturesFloat(const float* image_buffer,
int width,
int height,
int channels,
libmv_DetectOptions* options) {
// Prepare the image.
FloatImage image;
libmv_floatBufferToFloatImage(image_buffer, width, height, channels, &image);
// Configure detector.
DetectOptions detector_options;
libmv_convertDetectorOptions(options, &detector_options);
// Run the detector.
libmv::vector<Feature> detected_features;
Detect(image, detector_options, &detected_features);
// Convert result to C-API.
libmv_Features* result = libmv_featuresFromVector(detected_features);
return result;
}
void libmv_featuresDestroy(libmv_Features* libmv_features) {
if (libmv_features->features) {
LIBMV_STRUCT_DELETE(libmv_features->features);
}
LIBMV_STRUCT_DELETE(libmv_features);
}
int libmv_countFeatures(const libmv_Features* libmv_features) {
return libmv_features->count;
}
void libmv_getFeature(const libmv_Features* libmv_features,
int number,
double* x,
double* y,
double* score,
double* size) {
Feature &feature = libmv_features->features[number];
*x = feature.x;
*y = feature.y;
*score = feature.score;
*size = feature.size;
}

View File

@@ -1,77 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#ifndef LIBMV_C_API_DETECTOR_H_
#define LIBMV_C_API_DETECTOR_H_
#ifdef __cplusplus
extern "C" {
#endif
typedef struct libmv_Features libmv_Features;
enum {
LIBMV_DETECTOR_FAST,
LIBMV_DETECTOR_MORAVEC,
LIBMV_DETECTOR_HARRIS,
};
typedef struct libmv_DetectOptions {
int detector;
int margin;
int min_distance;
int fast_min_trackness;
int moravec_max_count;
unsigned char *moravec_pattern;
double harris_threshold;
} libmv_DetectOptions;
libmv_Features* libmv_detectFeaturesByte(const unsigned char* image_buffer,
int width,
int height,
int channels,
libmv_DetectOptions* options);
libmv_Features* libmv_detectFeaturesFloat(const float* image_buffer,
int width,
int height,
int channels,
libmv_DetectOptions* options);
void libmv_featuresDestroy(libmv_Features* libmv_features);
int libmv_countFeatures(const libmv_Features* libmv_features);
void libmv_getFeature(const libmv_Features* libmv_features,
int number,
double* x,
double* y,
double* score,
double* size);
#ifdef __cplusplus
}
#endif
#endif // LIBMV_C_API_DETECTOR_H_

View File

@@ -1,164 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2014 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#include "intern/frame_accessor.h"
#include "intern/image.h"
#include "intern/utildefines.h"
#include "libmv/autotrack/frame_accessor.h"
#include "libmv/autotrack/region.h"
#include "libmv/image/image.h"
namespace {
using libmv::FloatImage;
using mv::FrameAccessor;
using mv::Region;
struct LibmvFrameAccessor : public FrameAccessor {
LibmvFrameAccessor(libmv_FrameAccessorUserData* user_data,
libmv_GetImageCallback get_image_callback,
libmv_ReleaseImageCallback release_image_callback)
: user_data_(user_data),
get_image_callback_(get_image_callback),
release_image_callback_(release_image_callback) { }
libmv_InputMode get_libmv_input_mode(InputMode input_mode) {
switch (input_mode) {
#define CHECK_INPUT_MODE(mode) \
case mode: \
return LIBMV_IMAGE_MODE_ ## mode;
CHECK_INPUT_MODE(MONO)
CHECK_INPUT_MODE(RGBA)
#undef CHECK_INPUT_MODE
}
assert(!"unknown input mode passed from Libmv.");
// TODO(sergey): Proper error handling here in the future.
return LIBMV_IMAGE_MODE_MONO;
}
void get_libmv_region(const Region& region,
libmv_Region* libmv_region) {
libmv_region->min[0] = region.min(0);
libmv_region->min[1] = region.min(1);
libmv_region->max[0] = region.max(0);
libmv_region->max[1] = region.max(1);
}
Key GetImage(int clip,
int frame,
InputMode input_mode,
int downscale,
const Region* region,
const Transform* transform,
FloatImage* destination) {
float *float_buffer;
int width, height, channels;
libmv_Region libmv_region;
if (region) {
get_libmv_region(*region, &libmv_region);
}
Key cache_key = get_image_callback_(user_data_,
clip,
frame,
get_libmv_input_mode(input_mode),
downscale,
region != NULL ? &libmv_region : NULL,
(libmv_FrameTransform*) transform,
&float_buffer,
&width,
&height,
&channels);
// TODO(sergey): Dumb code for until we can set data directly.
FloatImage temp_image(float_buffer,
height,
width,
channels);
destination->CopyFrom(temp_image);
return cache_key;
}
void ReleaseImage(Key cache_key) {
release_image_callback_(cache_key);
}
bool GetClipDimensions(int clip, int *width, int *height) {
return false;
}
int NumClips() {
return 1;
}
int NumFrames(int clip) {
return 0;
}
libmv_FrameAccessorUserData* user_data_;
libmv_GetImageCallback get_image_callback_;
libmv_ReleaseImageCallback release_image_callback_;
};
} // namespace
libmv_FrameAccessor* libmv_FrameAccessorNew(
libmv_FrameAccessorUserData* user_data,
libmv_GetImageCallback get_image_callback,
libmv_ReleaseImageCallback release_image_callback) {
return (libmv_FrameAccessor*) LIBMV_OBJECT_NEW(LibmvFrameAccessor,
user_data,
get_image_callback,
release_image_callback);
}
void libmv_FrameAccessorDestroy(libmv_FrameAccessor* frame_accessor) {
LIBMV_OBJECT_DELETE(frame_accessor, LibmvFrameAccessor);
}
int64_t libmv_frameAccessorgetTransformKey(const libmv_FrameTransform *transform) {
return ((FrameAccessor::Transform*) transform)->key();
}
void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform *transform,
const libmv_FloatImage *input_image,
libmv_FloatImage *output_image) {
const FloatImage input(input_image->buffer,
input_image->height,
input_image->width,
input_image->channels);
FloatImage output;
((FrameAccessor::Transform*) transform)->run(input,
&output);
int num_pixels = output.Width() *output.Height() * output.Depth();
output_image->buffer = new float[num_pixels];
memcpy(output_image->buffer, output.Data(), num_pixels * sizeof(float));
output_image->width = output.Width();
output_image->height = output.Height();
output_image->channels = output.Depth();
}

View File

@@ -1,79 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2014 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#ifndef LIBMV_C_API_FRAME_ACCESSOR_H_
#define LIBMV_C_API_FRAME_ACCESSOR_H_
#include <stdint.h>
#include "intern/image.h"
#include "intern/region.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct libmv_FrameAccessor libmv_FrameAccessor;
typedef struct libmv_FrameTransform libmv_FrameTransform;
typedef struct libmv_FrameAccessorUserData libmv_FrameAccessorUserData;
typedef void *libmv_CacheKey;
typedef enum {
LIBMV_IMAGE_MODE_MONO,
LIBMV_IMAGE_MODE_RGBA,
} libmv_InputMode;
typedef libmv_CacheKey (*libmv_GetImageCallback) (
libmv_FrameAccessorUserData* user_data,
int clip,
int frame,
libmv_InputMode input_mode,
int downscale,
const libmv_Region* region,
const libmv_FrameTransform* transform,
float** destination,
int* width,
int* height,
int* channels);
typedef void (*libmv_ReleaseImageCallback) (libmv_CacheKey cache_key);
libmv_FrameAccessor* libmv_FrameAccessorNew(
libmv_FrameAccessorUserData* user_data,
libmv_GetImageCallback get_image_callback,
libmv_ReleaseImageCallback release_image_callback);
void libmv_FrameAccessorDestroy(libmv_FrameAccessor* frame_accessor);
int64_t libmv_frameAccessorgetTransformKey(const libmv_FrameTransform *transform);
void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform *transform,
const libmv_FloatImage *input_image,
libmv_FloatImage *output_image);
#ifdef __cplusplus
}
#endif
#endif // LIBMV_C_API_FRAME_ACCESSOR_H_

View File

@@ -1,59 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#include "intern/homography.h"
#include "intern/utildefines.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/homography.h"
void libmv_homography2DFromCorrespondencesEuc(/* const */ double (*x1)[2],
/* const */ double (*x2)[2],
int num_points,
double H[3][3]) {
libmv::Mat x1_mat, x2_mat;
libmv::Mat3 H_mat;
x1_mat.resize(2, num_points);
x2_mat.resize(2, num_points);
for (int i = 0; i < num_points; i++) {
x1_mat.col(i) = libmv::Vec2(x1[i][0], x1[i][1]);
x2_mat.col(i) = libmv::Vec2(x2[i][0], x2[i][1]);
}
LG << "x1: " << x1_mat;
LG << "x2: " << x2_mat;
libmv::EstimateHomographyOptions options;
libmv::EstimateHomography2DFromCorrespondences(x1_mat,
x2_mat,
options,
&H_mat);
LG << "H: " << H_mat;
memcpy(H, H_mat.data(), 9 * sizeof(double));
}

View File

@@ -1,43 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#ifndef LIBMV_C_API_HOMOGRAPHY_H_
#define LIBMV_C_API_HOMOGRAPHY_H_
#ifdef __cplusplus
extern "C" {
#endif
void libmv_homography2DFromCorrespondencesEuc(/* const */ double (*x1)[2],
/* const */ double (*x2)[2],
int num_points,
double H[3][3]);
#ifdef __cplusplus
}
#endif
#endif // LIBMV_C_API_HOMOGRAPHY_H_

View File

@@ -1,272 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#include "intern/image.h"
#include "intern/utildefines.h"
#include "libmv/tracking/track_region.h"
#include <cassert>
#include <png.h>
using libmv::FloatImage;
using libmv::SamplePlanarPatch;
void libmv_floatImageDestroy(libmv_FloatImage *image) {
delete [] image->buffer;
}
/* Image <-> buffers conversion */
void libmv_byteBufferToFloatImage(const unsigned char* buffer,
int width,
int height,
int channels,
FloatImage* image) {
image->Resize(height, width, channels);
for (int y = 0, a = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
for (int k = 0; k < channels; k++) {
(*image)(y, x, k) = (float)buffer[a++] / 255.0f;
}
}
}
}
void libmv_floatBufferToFloatImage(const float* buffer,
int width,
int height,
int channels,
FloatImage* image) {
image->Resize(height, width, channels);
for (int y = 0, a = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
for (int k = 0; k < channels; k++) {
(*image)(y, x, k) = buffer[a++];
}
}
}
}
void libmv_floatImageToFloatBuffer(const FloatImage &image,
float* buffer) {
for (int y = 0, a = 0; y < image.Height(); y++) {
for (int x = 0; x < image.Width(); x++) {
for (int k = 0; k < image.Depth(); k++) {
buffer[a++] = image(y, x, k);
}
}
}
}
void libmv_floatImageToByteBuffer(const libmv::FloatImage &image,
unsigned char* buffer) {
for (int y = 0, a= 0; y < image.Height(); y++) {
for (int x = 0; x < image.Width(); x++) {
for (int k = 0; k < image.Depth(); k++) {
buffer[a++] = image(y, x, k) * 255.0f;
}
}
}
}
static bool savePNGImage(png_bytep* row_pointers,
int width,
int height,
int depth,
int color_type,
const char* file_name) {
png_infop info_ptr;
png_structp png_ptr;
FILE *fp = fopen(file_name, "wb");
if (fp == NULL) {
return false;
}
/* Initialize stuff */
png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
info_ptr = png_create_info_struct(png_ptr);
if (setjmp(png_jmpbuf(png_ptr))) {
fclose(fp);
return false;
}
png_init_io(png_ptr, fp);
/* Write PNG header. */
if (setjmp(png_jmpbuf(png_ptr))) {
fclose(fp);
return false;
}
png_set_IHDR(png_ptr,
info_ptr,
width,
height,
depth,
color_type,
PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_BASE,
PNG_FILTER_TYPE_BASE);
png_write_info(png_ptr, info_ptr);
/* Write bytes/ */
if (setjmp(png_jmpbuf(png_ptr))) {
fclose(fp);
return false;
}
png_write_image(png_ptr, row_pointers);
/* End write/ */
if (setjmp(png_jmpbuf(png_ptr))) {
fclose(fp);
return false;
}
png_write_end(png_ptr, NULL);
fclose(fp);
return true;
}
bool libmv_saveImage(const FloatImage& image,
const char* prefix,
int x0,
int y0) {
int x, y;
png_bytep *row_pointers;
assert(image.Depth() == 1);
row_pointers = new png_bytep[image.Height()];
for (y = 0; y < image.Height(); y++) {
row_pointers[y] = new png_byte[4 * image.Width()];
for (x = 0; x < image.Width(); x++) {
if (x0 == x && image.Height() - y0 - 1 == y) {
row_pointers[y][x * 4 + 0] = 255;
row_pointers[y][x * 4 + 1] = 0;
row_pointers[y][x * 4 + 2] = 0;
row_pointers[y][x * 4 + 3] = 255;
} else {
float pixel = image(image.Height() - y - 1, x, 0);
row_pointers[y][x * 4 + 0] = pixel * 255;
row_pointers[y][x * 4 + 1] = pixel * 255;
row_pointers[y][x * 4 + 2] = pixel * 255;
row_pointers[y][x * 4 + 3] = 255;
}
}
}
static int image_counter = 0;
char file_name[128];
snprintf(file_name, sizeof(file_name),
"%s_%02d.png",
prefix, ++image_counter);
bool result = savePNGImage(row_pointers,
image.Width(),
image.Height(),
8,
PNG_COLOR_TYPE_RGBA,
file_name);
for (y = 0; y < image.Height(); y++) {
delete [] row_pointers[y];
}
delete [] row_pointers;
return result;
}
void libmv_samplePlanarPatchFloat(const float* image,
int width,
int height,
int channels,
const double* xs,
const double* ys,
int num_samples_x,
int num_samples_y,
const float* mask,
float* patch,
double* warped_position_x,
double* warped_position_y) {
FloatImage libmv_image, libmv_patch, libmv_mask;
FloatImage *libmv_mask_for_sample = NULL;
libmv_floatBufferToFloatImage(image, width, height, channels, &libmv_image);
if (mask) {
libmv_floatBufferToFloatImage(mask, width, height, 1, &libmv_mask);
libmv_mask_for_sample = &libmv_mask;
}
SamplePlanarPatch(libmv_image,
xs, ys,
num_samples_x, num_samples_y,
libmv_mask_for_sample,
&libmv_patch,
warped_position_x,
warped_position_y);
libmv_floatImageToFloatBuffer(libmv_patch, patch);
}
void libmv_samplePlanarPatchByte(const unsigned char* image,
int width,
int height,
int channels,
const double* xs,
const double* ys,
int num_samples_x,
int num_samples_y,
const float* mask,
unsigned char* patch,
double* warped_position_x,
double* warped_position_y) {
libmv::FloatImage libmv_image, libmv_patch, libmv_mask;
libmv::FloatImage *libmv_mask_for_sample = NULL;
libmv_byteBufferToFloatImage(image, width, height, channels, &libmv_image);
if (mask) {
libmv_floatBufferToFloatImage(mask, width, height, 1, &libmv_mask);
libmv_mask_for_sample = &libmv_mask;
}
libmv::SamplePlanarPatch(libmv_image,
xs, ys,
num_samples_x, num_samples_y,
libmv_mask_for_sample,
&libmv_patch,
warped_position_x,
warped_position_y);
libmv_floatImageToByteBuffer(libmv_patch, patch);
}

View File

@@ -1,99 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#ifndef LIBMV_IMAGE_H_
#define LIBMV_IMAGE_H_
#ifdef __cplusplus
# include "libmv/image/image.h"
void libmv_byteBufferToFloatImage(const unsigned char* buffer,
int width,
int height,
int channels,
libmv::FloatImage* image);
void libmv_floatBufferToFloatImage(const float* buffer,
int width,
int height,
int channels,
libmv::FloatImage* image);
void libmv_floatImageToFloatBuffer(const libmv::FloatImage& image,
float *buffer);
void libmv_floatImageToByteBuffer(const libmv::FloatImage& image,
unsigned char* buffer);
bool libmv_saveImage(const libmv::FloatImage& image,
const char* prefix,
int x0,
int y0);
#endif // __cplusplus
#ifdef __cplusplus
extern "C" {
#endif
typedef struct libmv_FloatImage {
float *buffer;
int width;
int height;
int channels;
} libmv_FloatImage;
void libmv_floatImageDestroy(libmv_FloatImage *image);
void libmv_samplePlanarPatchFloat(const float* image,
int width,
int height,
int channels,
const double* xs,
const double* ys,
int num_samples_x,
int num_samples_y,
const float* mask,
float* patch,
double* warped_position_x,
double* warped_position_y);
void libmv_samplePlanarPatchByte(const unsigned char* image,
int width,
int height,
int channels,
const double* xs,
const double* ys,
int num_samples_x,
int num_samples_y,
const float* mask,
unsigned char* patch,
double* warped_position_x,
double* warped_position_y);
#ifdef __cplusplus
}
#endif
#endif // LIBMV_IMAGE_H_

View File

@@ -1,55 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#include "intern/logging.h"
#include "intern/utildefines.h"
#include "libmv/logging/logging.h"
void libmv_initLogging(const char* argv0) {
// Make it so FATAL messages are always print into console.
char severity_fatal[32];
snprintf(severity_fatal, sizeof(severity_fatal), "%d",
google::GLOG_FATAL);
google::InitGoogleLogging(argv0);
gflags::SetCommandLineOption("logtostderr", "1");
gflags::SetCommandLineOption("v", "0");
gflags::SetCommandLineOption("stderrthreshold", severity_fatal);
gflags::SetCommandLineOption("minloglevel", severity_fatal);
}
void libmv_startDebugLogging(void) {
gflags::SetCommandLineOption("logtostderr", "1");
gflags::SetCommandLineOption("v", "2");
gflags::SetCommandLineOption("stderrthreshold", "1");
gflags::SetCommandLineOption("minloglevel", "0");
}
void libmv_setLoggingVerbosity(int verbosity) {
char val[10];
snprintf(val, sizeof(val), "%d", verbosity);
gflags::SetCommandLineOption("v", val);
}

View File

@@ -1,47 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#ifndef LIBMV_C_API_LOGGING_H_
#define LIBMV_C_API_LOGGING_H_
#ifdef __cplusplus
extern "C" {
#endif
// Initialize GLog logging.
void libmv_initLogging(const char* argv0);
// Switch Glog to debug logging level.
void libmv_startDebugLogging(void);
// Set GLog logging verbosity level.
void libmv_setLoggingVerbosity(int verbosity);
#ifdef __cplusplus
}
#endif
#endif // LIBMV_C_API_LOGGING_H_

View File

@@ -1,542 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#include "intern/reconstruction.h"
#include "intern/camera_intrinsics.h"
#include "intern/tracks.h"
#include "intern/utildefines.h"
#include "libmv/logging/logging.h"
#include "libmv/simple_pipeline/bundle.h"
#include "libmv/simple_pipeline/keyframe_selection.h"
#include "libmv/simple_pipeline/initialize_reconstruction.h"
#include "libmv/simple_pipeline/modal_solver.h"
#include "libmv/simple_pipeline/pipeline.h"
#include "libmv/simple_pipeline/reconstruction_scale.h"
#include "libmv/simple_pipeline/tracks.h"
using libmv::CameraIntrinsics;
using libmv::EuclideanCamera;
using libmv::EuclideanPoint;
using libmv::EuclideanReconstruction;
using libmv::EuclideanScaleToUnity;
using libmv::Marker;
using libmv::ProgressUpdateCallback;
using libmv::PolynomialCameraIntrinsics;
using libmv::Tracks;
using libmv::EuclideanBundle;
using libmv::EuclideanCompleteReconstruction;
using libmv::EuclideanReconstructTwoFrames;
using libmv::EuclideanReprojectionError;
struct libmv_Reconstruction {
EuclideanReconstruction reconstruction;
/* Used for per-track average error calculation after reconstruction */
Tracks tracks;
CameraIntrinsics *intrinsics;
double error;
bool is_valid;
};
namespace {
class ReconstructUpdateCallback : public ProgressUpdateCallback {
public:
ReconstructUpdateCallback(
reconstruct_progress_update_cb progress_update_callback,
void *callback_customdata) {
progress_update_callback_ = progress_update_callback;
callback_customdata_ = callback_customdata;
}
void invoke(double progress, const char* message) {
if (progress_update_callback_) {
progress_update_callback_(callback_customdata_, progress, message);
}
}
protected:
reconstruct_progress_update_cb progress_update_callback_;
void* callback_customdata_;
};
void libmv_solveRefineIntrinsics(
const Tracks &tracks,
const int refine_intrinsics,
const int bundle_constraints,
reconstruct_progress_update_cb progress_update_callback,
void* callback_customdata,
EuclideanReconstruction* reconstruction,
CameraIntrinsics* intrinsics) {
/* only a few combinations are supported but trust the caller/ */
int bundle_intrinsics = 0;
if (refine_intrinsics & LIBMV_REFINE_FOCAL_LENGTH) {
bundle_intrinsics |= libmv::BUNDLE_FOCAL_LENGTH;
}
if (refine_intrinsics & LIBMV_REFINE_PRINCIPAL_POINT) {
bundle_intrinsics |= libmv::BUNDLE_PRINCIPAL_POINT;
}
if (refine_intrinsics & LIBMV_REFINE_RADIAL_DISTORTION_K1) {
bundle_intrinsics |= libmv::BUNDLE_RADIAL_K1;
}
if (refine_intrinsics & LIBMV_REFINE_RADIAL_DISTORTION_K2) {
bundle_intrinsics |= libmv::BUNDLE_RADIAL_K2;
}
progress_update_callback(callback_customdata, 1.0, "Refining solution");
EuclideanBundleCommonIntrinsics(tracks,
bundle_intrinsics,
bundle_constraints,
reconstruction,
intrinsics);
}
void finishReconstruction(
const Tracks &tracks,
const CameraIntrinsics &camera_intrinsics,
libmv_Reconstruction *libmv_reconstruction,
reconstruct_progress_update_cb progress_update_callback,
void *callback_customdata) {
EuclideanReconstruction &reconstruction =
libmv_reconstruction->reconstruction;
/* Reprojection error calculation. */
progress_update_callback(callback_customdata, 1.0, "Finishing solution");
libmv_reconstruction->tracks = tracks;
libmv_reconstruction->error = EuclideanReprojectionError(tracks,
reconstruction,
camera_intrinsics);
}
bool selectTwoKeyframesBasedOnGRICAndVariance(
Tracks& tracks,
Tracks& normalized_tracks,
CameraIntrinsics& camera_intrinsics,
int& keyframe1,
int& keyframe2) {
libmv::vector<int> keyframes;
/* Get list of all keyframe candidates first. */
SelectKeyframesBasedOnGRICAndVariance(normalized_tracks,
camera_intrinsics,
keyframes);
if (keyframes.size() < 2) {
LG << "Not enough keyframes detected by GRIC";
return false;
} else if (keyframes.size() == 2) {
keyframe1 = keyframes[0];
keyframe2 = keyframes[1];
return true;
}
/* Now choose two keyframes with minimal reprojection error after initial
* reconstruction choose keyframes with the least reprojection error after
* solving from two candidate keyframes.
*
* In fact, currently libmv returns single pair only, so this code will
* not actually run. But in the future this could change, so let's stay
* prepared.
*/
int previous_keyframe = keyframes[0];
double best_error = std::numeric_limits<double>::max();
for (int i = 1; i < keyframes.size(); i++) {
EuclideanReconstruction reconstruction;
int current_keyframe = keyframes[i];
libmv::vector<Marker> keyframe_markers =
normalized_tracks.MarkersForTracksInBothImages(previous_keyframe,
current_keyframe);
Tracks keyframe_tracks(keyframe_markers);
/* get a solution from two keyframes only */
EuclideanReconstructTwoFrames(keyframe_markers, &reconstruction);
EuclideanBundle(keyframe_tracks, &reconstruction);
EuclideanCompleteReconstruction(keyframe_tracks,
&reconstruction,
NULL);
double current_error = EuclideanReprojectionError(tracks,
reconstruction,
camera_intrinsics);
LG << "Error between " << previous_keyframe
<< " and " << current_keyframe
<< ": " << current_error;
if (current_error < best_error) {
best_error = current_error;
keyframe1 = previous_keyframe;
keyframe2 = current_keyframe;
}
previous_keyframe = current_keyframe;
}
return true;
}
Marker libmv_projectMarker(const EuclideanPoint& point,
const EuclideanCamera& camera,
const CameraIntrinsics& intrinsics) {
libmv::Vec3 projected = camera.R * point.X + camera.t;
projected /= projected(2);
libmv::Marker reprojected_marker;
intrinsics.ApplyIntrinsics(projected(0), projected(1),
&reprojected_marker.x,
&reprojected_marker.y);
reprojected_marker.image = camera.image;
reprojected_marker.track = point.track;
return reprojected_marker;
}
void libmv_getNormalizedTracks(const Tracks &tracks,
const CameraIntrinsics &camera_intrinsics,
Tracks *normalized_tracks) {
libmv::vector<Marker> markers = tracks.AllMarkers();
for (int i = 0; i < markers.size(); ++i) {
Marker &marker = markers[i];
camera_intrinsics.InvertIntrinsics(marker.x, marker.y,
&marker.x, &marker.y);
normalized_tracks->Insert(marker.image,
marker.track,
marker.x, marker.y,
marker.weight);
}
}
} // namespace
libmv_Reconstruction *libmv_solveReconstruction(
const libmv_Tracks* libmv_tracks,
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
libmv_ReconstructionOptions* libmv_reconstruction_options,
reconstruct_progress_update_cb progress_update_callback,
void* callback_customdata) {
libmv_Reconstruction *libmv_reconstruction =
LIBMV_OBJECT_NEW(libmv_Reconstruction);
Tracks &tracks = *((Tracks *) libmv_tracks);
EuclideanReconstruction &reconstruction =
libmv_reconstruction->reconstruction;
ReconstructUpdateCallback update_callback =
ReconstructUpdateCallback(progress_update_callback,
callback_customdata);
/* Retrieve reconstruction options from C-API to libmv API. */
CameraIntrinsics *camera_intrinsics;
camera_intrinsics = libmv_reconstruction->intrinsics =
libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
/* Invert the camera intrinsics/ */
Tracks normalized_tracks;
libmv_getNormalizedTracks(tracks, *camera_intrinsics, &normalized_tracks);
/* keyframe selection. */
int keyframe1 = libmv_reconstruction_options->keyframe1,
keyframe2 = libmv_reconstruction_options->keyframe2;
if (libmv_reconstruction_options->select_keyframes) {
LG << "Using automatic keyframe selection";
update_callback.invoke(0, "Selecting keyframes");
selectTwoKeyframesBasedOnGRICAndVariance(tracks,
normalized_tracks,
*camera_intrinsics,
keyframe1,
keyframe2);
/* so keyframes in the interface would be updated */
libmv_reconstruction_options->keyframe1 = keyframe1;
libmv_reconstruction_options->keyframe2 = keyframe2;
}
/* Actual reconstruction. */
LG << "frames to init from: " << keyframe1 << " " << keyframe2;
libmv::vector<Marker> keyframe_markers =
normalized_tracks.MarkersForTracksInBothImages(keyframe1, keyframe2);
LG << "number of markers for init: " << keyframe_markers.size();
if (keyframe_markers.size() < 8) {
LG << "No enough markers to initialize from";
libmv_reconstruction->is_valid = false;
return libmv_reconstruction;
}
update_callback.invoke(0, "Initial reconstruction");
EuclideanReconstructTwoFrames(keyframe_markers, &reconstruction);
EuclideanBundle(normalized_tracks, &reconstruction);
EuclideanCompleteReconstruction(normalized_tracks,
&reconstruction,
&update_callback);
/* Refinement/ */
if (libmv_reconstruction_options->refine_intrinsics) {
libmv_solveRefineIntrinsics(
tracks,
libmv_reconstruction_options->refine_intrinsics,
libmv::BUNDLE_NO_CONSTRAINTS,
progress_update_callback,
callback_customdata,
&reconstruction,
camera_intrinsics);
}
/* Set reconstruction scale to unity. */
EuclideanScaleToUnity(&reconstruction);
/* Finish reconstruction. */
finishReconstruction(tracks,
*camera_intrinsics,
libmv_reconstruction,
progress_update_callback,
callback_customdata);
libmv_reconstruction->is_valid = true;
return (libmv_Reconstruction *) libmv_reconstruction;
}
libmv_Reconstruction *libmv_solveModal(
const libmv_Tracks *libmv_tracks,
const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options,
const libmv_ReconstructionOptions *libmv_reconstruction_options,
reconstruct_progress_update_cb progress_update_callback,
void *callback_customdata) {
libmv_Reconstruction *libmv_reconstruction =
LIBMV_OBJECT_NEW(libmv_Reconstruction);
Tracks &tracks = *((Tracks *) libmv_tracks);
EuclideanReconstruction &reconstruction =
libmv_reconstruction->reconstruction;
ReconstructUpdateCallback update_callback =
ReconstructUpdateCallback(progress_update_callback,
callback_customdata);
/* Retrieve reconstruction options from C-API to libmv API. */
CameraIntrinsics *camera_intrinsics;
camera_intrinsics = libmv_reconstruction->intrinsics =
libmv_cameraIntrinsicsCreateFromOptions(
libmv_camera_intrinsics_options);
/* Invert the camera intrinsics. */
Tracks normalized_tracks;
libmv_getNormalizedTracks(tracks, *camera_intrinsics, &normalized_tracks);
/* Actual reconstruction. */
ModalSolver(normalized_tracks, &reconstruction, &update_callback);
PolynomialCameraIntrinsics empty_intrinsics;
EuclideanBundleCommonIntrinsics(normalized_tracks,
libmv::BUNDLE_NO_INTRINSICS,
libmv::BUNDLE_NO_TRANSLATION,
&reconstruction,
&empty_intrinsics);
/* Refinement. */
if (libmv_reconstruction_options->refine_intrinsics) {
libmv_solveRefineIntrinsics(
tracks,
libmv_reconstruction_options->refine_intrinsics,
libmv::BUNDLE_NO_TRANSLATION,
progress_update_callback, callback_customdata,
&reconstruction,
camera_intrinsics);
}
/* Finish reconstruction. */
finishReconstruction(tracks,
*camera_intrinsics,
libmv_reconstruction,
progress_update_callback,
callback_customdata);
libmv_reconstruction->is_valid = true;
return (libmv_Reconstruction *) libmv_reconstruction;
}
int libmv_reconstructionIsValid(libmv_Reconstruction *libmv_reconstruction) {
return libmv_reconstruction->is_valid;
}
void libmv_reconstructionDestroy(libmv_Reconstruction *libmv_reconstruction) {
LIBMV_OBJECT_DELETE(libmv_reconstruction->intrinsics, CameraIntrinsics);
LIBMV_OBJECT_DELETE(libmv_reconstruction, libmv_Reconstruction);
}
int libmv_reprojectionPointForTrack(
const libmv_Reconstruction *libmv_reconstruction,
int track,
double pos[3]) {
const EuclideanReconstruction *reconstruction =
&libmv_reconstruction->reconstruction;
const EuclideanPoint *point =
reconstruction->PointForTrack(track);
if (point) {
pos[0] = point->X[0];
pos[1] = point->X[2];
pos[2] = point->X[1];
return 1;
}
return 0;
}
double libmv_reprojectionErrorForTrack(
const libmv_Reconstruction *libmv_reconstruction,
int track) {
const EuclideanReconstruction *reconstruction =
&libmv_reconstruction->reconstruction;
const CameraIntrinsics *intrinsics = libmv_reconstruction->intrinsics;
libmv::vector<Marker> markers =
libmv_reconstruction->tracks.MarkersForTrack(track);
int num_reprojected = 0;
double total_error = 0.0;
for (int i = 0; i < markers.size(); ++i) {
double weight = markers[i].weight;
const EuclideanCamera *camera =
reconstruction->CameraForImage(markers[i].image);
const EuclideanPoint *point =
reconstruction->PointForTrack(markers[i].track);
if (!camera || !point || weight == 0.0) {
continue;
}
num_reprojected++;
Marker reprojected_marker =
libmv_projectMarker(*point, *camera, *intrinsics);
double ex = (reprojected_marker.x - markers[i].x) * weight;
double ey = (reprojected_marker.y - markers[i].y) * weight;
total_error += sqrt(ex * ex + ey * ey);
}
return total_error / num_reprojected;
}
double libmv_reprojectionErrorForImage(
const libmv_Reconstruction *libmv_reconstruction,
int image) {
const EuclideanReconstruction *reconstruction =
&libmv_reconstruction->reconstruction;
const CameraIntrinsics *intrinsics = libmv_reconstruction->intrinsics;
libmv::vector<Marker> markers =
libmv_reconstruction->tracks.MarkersInImage(image);
const EuclideanCamera *camera = reconstruction->CameraForImage(image);
int num_reprojected = 0;
double total_error = 0.0;
if (!camera) {
return 0.0;
}
for (int i = 0; i < markers.size(); ++i) {
const EuclideanPoint *point =
reconstruction->PointForTrack(markers[i].track);
if (!point) {
continue;
}
num_reprojected++;
Marker reprojected_marker =
libmv_projectMarker(*point, *camera, *intrinsics);
double ex = (reprojected_marker.x - markers[i].x) * markers[i].weight;
double ey = (reprojected_marker.y - markers[i].y) * markers[i].weight;
total_error += sqrt(ex * ex + ey * ey);
}
return total_error / num_reprojected;
}
int libmv_reprojectionCameraForImage(
const libmv_Reconstruction *libmv_reconstruction,
int image,
double mat[4][4]) {
const EuclideanReconstruction *reconstruction =
&libmv_reconstruction->reconstruction;
const EuclideanCamera *camera =
reconstruction->CameraForImage(image);
if (camera) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 3; ++k) {
int l = k;
if (k == 1) {
l = 2;
} else if (k == 2) {
l = 1;
}
if (j == 2) {
mat[j][l] = -camera->R(j, k);
} else {
mat[j][l] = camera->R(j, k);
}
}
mat[j][3] = 0.0;
}
libmv::Vec3 optical_center = -camera->R.transpose() * camera->t;
mat[3][0] = optical_center(0);
mat[3][1] = optical_center(2);
mat[3][2] = optical_center(1);
mat[3][3] = 1.0;
return 1;
}
return 0;
}
double libmv_reprojectionError(
const libmv_Reconstruction *libmv_reconstruction) {
return libmv_reconstruction->error;
}
libmv_CameraIntrinsics *libmv_reconstructionExtractIntrinsics(
libmv_Reconstruction *libmv_reconstruction) {
return (libmv_CameraIntrinsics *) libmv_reconstruction->intrinsics;
}

View File

@@ -1,102 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#ifndef LIBMV_C_API_RECONSTRUCTION_H_
#define LIBMV_C_API_RECONSTRUCTION_H_
#ifdef __cplusplus
extern "C" {
#endif
struct libmv_Tracks;
struct libmv_CameraIntrinsics;
struct libmv_CameraIntrinsicsOptions;
typedef struct libmv_Reconstruction libmv_Reconstruction;
enum {
LIBMV_REFINE_FOCAL_LENGTH = (1 << 0),
LIBMV_REFINE_PRINCIPAL_POINT = (1 << 1),
LIBMV_REFINE_RADIAL_DISTORTION_K1 = (1 << 2),
LIBMV_REFINE_RADIAL_DISTORTION_K2 = (1 << 4),
};
typedef struct libmv_ReconstructionOptions {
int select_keyframes;
int keyframe1, keyframe2;
int refine_intrinsics;
} libmv_ReconstructionOptions;
typedef void (*reconstruct_progress_update_cb) (void* customdata,
double progress,
const char* message);
libmv_Reconstruction* libmv_solveReconstruction(
const struct libmv_Tracks* libmv_tracks,
const struct libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
libmv_ReconstructionOptions* libmv_reconstruction_options,
reconstruct_progress_update_cb progress_update_callback,
void* callback_customdata);
libmv_Reconstruction* libmv_solveModal(
const struct libmv_Tracks* libmv_tracks,
const struct libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
const libmv_ReconstructionOptions* libmv_reconstruction_options,
reconstruct_progress_update_cb progress_update_callback,
void* callback_customdata);
int libmv_reconstructionIsValid(libmv_Reconstruction *libmv_reconstruction);
void libmv_reconstructionDestroy(libmv_Reconstruction* libmv_reconstruction);
int libmv_reprojectionPointForTrack(
const libmv_Reconstruction* libmv_reconstruction,
int track,
double pos[3]);
double libmv_reprojectionErrorForTrack(
const libmv_Reconstruction* libmv_reconstruction,
int track);
double libmv_reprojectionErrorForImage(
const libmv_Reconstruction* libmv_reconstruction,
int image);
int libmv_reprojectionCameraForImage(
const libmv_Reconstruction* libmv_reconstruction,
int image,
double mat[4][4]);
double libmv_reprojectionError(const libmv_Reconstruction* libmv_reconstruction);
struct libmv_CameraIntrinsics* libmv_reconstructionExtractIntrinsics(
libmv_Reconstruction *libmv_Reconstruction);
#ifdef __cplusplus
}
#endif
#endif // LIBMV_C_API_RECONSTRUCTION_H_

View File

@@ -1,43 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2014 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#ifndef LIBMV_C_API_REGION_H_
#define LIBMV_C_API_REGION_H_
#ifdef __cplusplus
extern "C" {
#endif
typedef struct libmv_Region {
float min[2];
float max[2];
} libmv_Region;
#ifdef __cplusplus
}
#endif
#endif // LIBMV_C_API_REGION_H_

View File

@@ -1,403 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2013 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#include "libmv-capi.h"
#include <cstdlib>
#include <cstring>
/* ************ Logging ************ */
void libmv_initLogging(const char * /*argv0*/) {
}
void libmv_startDebugLogging(void) {
}
void libmv_setLoggingVerbosity(int /*verbosity*/) {
}
/* ************ Planar tracker ************ */
/* TrackRegion (new planar tracker) */
int libmv_trackRegion(const libmv_TrackRegionOptions * /*options*/,
const float * /*image1*/,
int /*image1_width*/,
int /*image1_height*/,
const float * /*image2*/,
int /*image2_width*/,
int /*image2_height*/,
const double *x1,
const double *y1,
libmv_TrackRegionResult *result,
double *x2,
double *y2) {
/* Convert to doubles for the libmv api. The four corners and the center. */
for (int i = 0; i < 5; ++i) {
x2[i] = x1[i];
y2[i] = y1[i];
}
result->termination = -1;
result->termination_reason = "Built without libmv support";
result->correlation = 0.0;
return false;
}
void libmv_samplePlanarPatchFloat(const float * /*image*/,
int /*width*/,
int /*height*/,
int /*channels*/,
const double * /*xs*/,
const double * /*ys*/,
int /*num_samples_x*/,
int /*num_samples_y*/,
const float * /*mask*/,
float * /*patch*/,
double * /*warped_position_x*/,
double * /*warped_position_y*/) {
/* TODO(sergey): implement */
}
void libmv_samplePlanarPatchByte(const unsigned char * /*image*/,
int /*width*/,
int /*height*/,
int /*channels*/,
const double * /*xs*/,
const double * /*ys*/,
int /*num_samples_x*/, int /*num_samples_y*/,
const float * /*mask*/,
unsigned char * /*patch*/,
double * /*warped_position_x*/,
double * /*warped_position_y*/) {
/* TODO(sergey): implement */
}
void libmv_floatImageDestroy(libmv_FloatImage* /*image*/)
{
}
/* ************ Tracks ************ */
libmv_Tracks *libmv_tracksNew(void) {
return NULL;
}
void libmv_tracksInsert(libmv_Tracks * /*libmv_tracks*/,
int /*image*/,
int /*track*/,
double /*x*/,
double /*y*/,
double /*weight*/) {
}
void libmv_tracksDestroy(libmv_Tracks * /*libmv_tracks*/) {
}
/* ************ Reconstruction solver ************ */
libmv_Reconstruction *libmv_solveReconstruction(
const libmv_Tracks * /*libmv_tracks*/,
const libmv_CameraIntrinsicsOptions * /*libmv_camera_intrinsics_options*/,
libmv_ReconstructionOptions * /*libmv_reconstruction_options*/,
reconstruct_progress_update_cb /*progress_update_callback*/,
void * /*callback_customdata*/) {
return NULL;
}
libmv_Reconstruction *libmv_solveModal(
const libmv_Tracks * /*libmv_tracks*/,
const libmv_CameraIntrinsicsOptions * /*libmv_camera_intrinsics_options*/,
const libmv_ReconstructionOptions * /*libmv_reconstruction_options*/,
reconstruct_progress_update_cb /*progress_update_callback*/,
void * /*callback_customdata*/) {
return NULL;
}
int libmv_reconstructionIsValid(libmv_Reconstruction * /*libmv_reconstruction*/) {
return 0;
}
int libmv_reprojectionPointForTrack(
const libmv_Reconstruction * /*libmv_reconstruction*/,
int /*track*/,
double /*pos*/[3]) {
return 0;
}
double libmv_reprojectionErrorForTrack(
const libmv_Reconstruction * /*libmv_reconstruction*/,
int /*track*/) {
return 0.0;
}
double libmv_reprojectionErrorForImage(
const libmv_Reconstruction * /*libmv_reconstruction*/,
int /*image*/) {
return 0.0;
}
int libmv_reprojectionCameraForImage(
const libmv_Reconstruction * /*libmv_reconstruction*/,
int /*image*/,
double /*mat*/[4][4]) {
return 0;
}
double libmv_reprojectionError(
const libmv_Reconstruction * /*libmv_reconstruction*/) {
return 0.0;
}
void libmv_reconstructionDestroy(
struct libmv_Reconstruction * /*libmv_reconstruction*/) {
}
/* ************ Feature detector ************ */
libmv_Features *libmv_detectFeaturesByte(const unsigned char * /*image_buffer*/,
int /*width*/,
int /*height*/,
int /*channels*/,
libmv_DetectOptions * /*options*/) {
return NULL;
}
struct libmv_Features *libmv_detectFeaturesFloat(
const float * /*image_buffer*/,
int /*width*/,
int /*height*/,
int /*channels*/,
libmv_DetectOptions * /*options*/) {
return NULL;
}
int libmv_countFeatures(const libmv_Features * /*libmv_features*/) {
return 0;
}
void libmv_getFeature(const libmv_Features * /*libmv_features*/,
int /*number*/,
double *x,
double *y,
double *score,
double *size) {
*x = 0.0;
*y = 0.0;
*score = 0.0;
*size = 0.0;
}
void libmv_featuresDestroy(struct libmv_Features * /*libmv_features*/) {
}
/* ************ Camera intrinsics ************ */
libmv_CameraIntrinsics *libmv_reconstructionExtractIntrinsics(
libmv_Reconstruction * /*libmv_reconstruction*/) {
return NULL;
}
libmv_CameraIntrinsics *libmv_cameraIntrinsicsNew(
const libmv_CameraIntrinsicsOptions * /*libmv_camera_intrinsics_options*/) {
return NULL;
}
libmv_CameraIntrinsics *libmv_cameraIntrinsicsCopy(
const libmv_CameraIntrinsics * /*libmvIntrinsics*/) {
return NULL;
}
void libmv_cameraIntrinsicsDestroy(
libmv_CameraIntrinsics * /*libmvIntrinsics*/) {
}
void libmv_cameraIntrinsicsUpdate(
const libmv_CameraIntrinsicsOptions * /*libmv_camera_intrinsics_options*/,
libmv_CameraIntrinsics * /*libmv_intrinsics*/) {
}
void libmv_cameraIntrinsicsSetThreads(
libmv_CameraIntrinsics * /*libmv_intrinsics*/,
int /*threads*/) {
}
void libmv_cameraIntrinsicsExtractOptions(
const libmv_CameraIntrinsics * /*libmv_intrinsics*/,
libmv_CameraIntrinsicsOptions *camera_intrinsics_options) {
memset(camera_intrinsics_options, 0, sizeof(libmv_CameraIntrinsicsOptions));
camera_intrinsics_options->focal_length = 1.0;
}
void libmv_cameraIntrinsicsUndistortByte(
const libmv_CameraIntrinsics * /*libmv_intrinsics*/,
const unsigned char *source_image,
int width, int height,
float overscan, int channels,
unsigned char *destination_image) {
memcpy(destination_image, source_image,
channels * width * height * sizeof(unsigned char));
}
void libmv_cameraIntrinsicsUndistortFloat(
const libmv_CameraIntrinsics* /*libmv_intrinsics*/,
const float* source_image,
int width,
int height,
float overscan,
int channels,
float* destination_image) {
memcpy(destination_image, source_image,
channels * width * height * sizeof(float));
}
void libmv_cameraIntrinsicsDistortByte(
const struct libmv_CameraIntrinsics* /*libmv_intrinsics*/,
const unsigned char *source_image,
int width,
int height,
float overscan,
int channels,
unsigned char *destination_image) {
memcpy(destination_image, source_image,
channels * width * height * sizeof(unsigned char));
}
void libmv_cameraIntrinsicsDistortFloat(
const libmv_CameraIntrinsics* /*libmv_intrinsics*/,
float* source_image,
int width,
int height,
float overscan,
int channels,
float* destination_image) {
memcpy(destination_image, source_image,
channels * width * height * sizeof(float));
}
/* ************ utils ************ */
void libmv_cameraIntrinsicsApply(
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
double x,
double y,
double* x1,
double* y1) {
double focal_length = libmv_camera_intrinsics_options->focal_length;
double principal_x = libmv_camera_intrinsics_options->principal_point_x;
double principal_y = libmv_camera_intrinsics_options->principal_point_y;
*x1 = x * focal_length + principal_x;
*y1 = y * focal_length + principal_y;
}
void libmv_cameraIntrinsicsInvert(
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
double x,
double y,
double* x1,
double* y1) {
double focal_length = libmv_camera_intrinsics_options->focal_length;
double principal_x = libmv_camera_intrinsics_options->principal_point_x;
double principal_y = libmv_camera_intrinsics_options->principal_point_y;
*x1 = (x - principal_x) / focal_length;
*y1 = (y - principal_y) / focal_length;
}
void libmv_homography2DFromCorrespondencesEuc(/* const */ double (*x1)[2],
/* const */ double (*x2)[2],
int num_points,
double H[3][3]) {
memset(H, 0, sizeof(double[3][3]));
H[0][0] = 1.0f;
H[1][1] = 1.0f;
H[2][2] = 1.0f;
}
/* ************ autotrack ************ */
libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor* /*frame_accessor*/)
{
return NULL;
}
void libmv_autoTrackDestroy(libmv_AutoTrack* /*libmv_autotrack*/)
{
}
void libmv_autoTrackSetOptions(libmv_AutoTrack* /*libmv_autotrack*/,
const libmv_AutoTrackOptions* /*options*/)
{
}
int libmv_autoTrackMarker(libmv_AutoTrack* /*libmv_autotrack*/,
const libmv_TrackRegionOptions* /*libmv_options*/,
libmv_Marker * /*libmv_tracker_marker*/,
libmv_TrackRegionResult* /*libmv_result*/)
{
return 0;
}
void libmv_autoTrackAddMarker(libmv_AutoTrack* /*libmv_autotrack*/,
const libmv_Marker* /*libmv_marker*/)
{
}
int libmv_autoTrackGetMarker(libmv_AutoTrack* /*libmv_autotrack*/,
int /*clip*/,
int /*frame*/,
int /*track*/,
libmv_Marker* /*libmv_marker*/)
{
return 0;
}
/* ************ frame accessor ************ */
libmv_FrameAccessor* libmv_FrameAccessorNew(
libmv_FrameAccessorUserData* /*user_data**/,
libmv_GetImageCallback /*get_image_callback*/,
libmv_ReleaseImageCallback /*release_image_callback*/)
{
return NULL;
}
void libmv_FrameAccessorDestroy(libmv_FrameAccessor* /*frame_accessor*/)
{
}
int64_t libmv_frameAccessorgetTransformKey(
const libmv_FrameTransform * /*transform*/)
{
return 0;
}
void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform* /*transform*/,
const libmv_FloatImage* /*input_image*/,
libmv_FloatImage* /*output_image*/)
{
}

View File

@@ -1,177 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#include "intern/track_region.h"
#include "intern/image.h"
#include "intern/utildefines.h"
#include "libmv/image/image.h"
#include "libmv/tracking/track_region.h"
/* define this to generate PNG images with content of search areas
tracking between which failed */
#undef DUMP_FAILURE
/* define this to generate PNG images with content of search areas
on every itteration of tracking */
#undef DUMP_ALWAYS
using libmv::FloatImage;
using libmv::TrackRegionOptions;
using libmv::TrackRegionResult;
using libmv::TrackRegion;
void libmv_configureTrackRegionOptions(
const libmv_TrackRegionOptions& options,
TrackRegionOptions* track_region_options) {
switch (options.motion_model) {
#define LIBMV_CONVERT(the_model) \
case TrackRegionOptions::the_model: \
track_region_options->mode = TrackRegionOptions::the_model; \
break;
LIBMV_CONVERT(TRANSLATION)
LIBMV_CONVERT(TRANSLATION_ROTATION)
LIBMV_CONVERT(TRANSLATION_SCALE)
LIBMV_CONVERT(TRANSLATION_ROTATION_SCALE)
LIBMV_CONVERT(AFFINE)
LIBMV_CONVERT(HOMOGRAPHY)
#undef LIBMV_CONVERT
}
track_region_options->minimum_correlation = options.minimum_correlation;
track_region_options->max_iterations = options.num_iterations;
track_region_options->sigma = options.sigma;
track_region_options->num_extra_points = 1;
track_region_options->image1_mask = NULL;
track_region_options->use_brute_initialization = options.use_brute;
/* TODO(keir): This will make some cases better, but may be a regression until
* the motion model is in. Since this is on trunk, enable it for now.
*
* TODO(sergey): This gives much worse results on mango footage (see 04_2e)
* so disabling for now for until proper prediction model is landed.
*
* The thing is, currently blender sends input coordinates as the guess to
* region tracker and in case of fast motion such an early out ruins the track.
*/
track_region_options->attempt_refine_before_brute = false;
track_region_options->use_normalized_intensities = options.use_normalization;
}
void libmv_regionTrackergetResult(const TrackRegionResult& track_region_result,
libmv_TrackRegionResult* result) {
result->termination = (int) track_region_result.termination;
result->termination_reason = "";
result->correlation = track_region_result.correlation;
}
int libmv_trackRegion(const libmv_TrackRegionOptions* options,
const float* image1,
int image1_width,
int image1_height,
const float* image2,
int image2_width,
int image2_height,
const double* x1,
const double* y1,
libmv_TrackRegionResult* result,
double* x2,
double* y2) {
double xx1[5], yy1[5];
double xx2[5], yy2[5];
bool tracking_result = false;
// Convert to doubles for the libmv api. The four corners and the center.
for (int i = 0; i < 5; ++i) {
xx1[i] = x1[i];
yy1[i] = y1[i];
xx2[i] = x2[i];
yy2[i] = y2[i];
}
TrackRegionOptions track_region_options;
FloatImage image1_mask;
libmv_configureTrackRegionOptions(*options, &track_region_options);
if (options->image1_mask) {
libmv_floatBufferToFloatImage(options->image1_mask,
image1_width,
image1_height,
1,
&image1_mask);
track_region_options.image1_mask = &image1_mask;
}
// Convert from raw float buffers to libmv's FloatImage.
FloatImage old_patch, new_patch;
libmv_floatBufferToFloatImage(image1,
image1_width,
image1_height,
1,
&old_patch);
libmv_floatBufferToFloatImage(image2,
image2_width,
image2_height,
1,
&new_patch);
TrackRegionResult track_region_result;
TrackRegion(old_patch, new_patch,
xx1, yy1,
track_region_options,
xx2, yy2,
&track_region_result);
// Convert to floats for the blender api.
for (int i = 0; i < 5; ++i) {
x2[i] = xx2[i];
y2[i] = yy2[i];
}
// TODO(keir): Update the termination string with failure details.
if (track_region_result.termination == TrackRegionResult::CONVERGENCE ||
track_region_result.termination == TrackRegionResult::NO_CONVERGENCE) {
tracking_result = true;
}
// Debug dump of patches.
#if defined(DUMP_FAILURE) || defined(DUMP_ALWAYS)
bool need_dump = !tracking_result;
# ifdef DUMP_ALWAYS
need_dump = true;
# endif
if (need_dump) {
libmv_saveImage(old_patch, "old_patch", x1[4], y1[4]);
libmv_saveImage(new_patch, "new_patch", x2[4], y2[4]);
if (options->image1_mask) {
libmv_saveImage(image1_mask, "mask", x2[4], y2[4]);
}
}
#endif
return tracking_result;
}

View File

@@ -1,81 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#ifndef LIBMV_C_API_TRACK_REGION_H_
#define LIBMV_C_API_TRACK_REGION_H_
#ifdef __cplusplus
extern "C" {
#endif
typedef struct libmv_TrackRegionOptions {
int motion_model;
int num_iterations;
int use_brute;
int use_normalization;
double minimum_correlation;
double sigma;
float *image1_mask;
} libmv_TrackRegionOptions;
typedef struct libmv_TrackRegionResult {
int termination;
const char* termination_reason;
double correlation;
} libmv_TrackRegionResult;
#ifdef __cplusplus
namespace libmv {
struct TrackRegionOptions;
struct TrackRegionResult;
}
void libmv_configureTrackRegionOptions(
const libmv_TrackRegionOptions& options,
libmv::TrackRegionOptions* track_region_options);
void libmv_regionTrackergetResult(
const libmv::TrackRegionResult& track_region_result,
libmv_TrackRegionResult* result);
#endif
int libmv_trackRegion(const libmv_TrackRegionOptions* options,
const float* image1,
int image1_width,
int image1_height,
const float* image2,
int image2_width,
int image2_height,
const double* x1,
const double* y1,
libmv_TrackRegionResult* result,
double* x2,
double* y2);
#ifdef __cplusplus
}
#endif
#endif // LIBMV_C_API_PLANAR_TRACKER_H_

View File

@@ -1,52 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#include "intern/tracks.h"
#include "intern/utildefines.h"
#include "libmv/simple_pipeline/tracks.h"
using libmv::Marker;
using libmv::Tracks;
libmv_Tracks* libmv_tracksNew(void) {
Tracks* tracks = LIBMV_OBJECT_NEW(Tracks);
return (libmv_Tracks*) tracks;
}
void libmv_tracksDestroy(libmv_Tracks* libmv_tracks) {
LIBMV_OBJECT_DELETE(libmv_tracks, Tracks);
}
void libmv_tracksInsert(libmv_Tracks *libmv_tracks,
int image,
int track,
double x,
double y,
double weight) {
((Tracks *) libmv_tracks)->Insert(image, track, x, y, weight);
}

View File

@@ -1,51 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#ifndef LIBMV_C_API_TRACKS_H_
#define LIBMV_C_API_TRACKS_H_
#ifdef __cplusplus
extern "C" {
#endif
typedef struct libmv_Tracks libmv_Tracks;
libmv_Tracks* libmv_tracksNew(void);
void libmv_tracksDestroy(libmv_Tracks* libmv_tracks);
void libmv_tracksInsert(libmv_Tracks* libmv_tracks,
int image,
int track,
double x,
double y,
double weight);
#ifdef __cplusplus
}
#endif
#endif // LIBMV_C_API_TRACKS_H_

View File

@@ -1,138 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#include "intern/tracksN.h"
#include "intern/utildefines.h"
#include "libmv/autotrack/marker.h"
#include "libmv/autotrack/tracks.h"
using mv::Marker;
using mv::Tracks;
void libmv_apiMarkerToMarker(const libmv_Marker& libmv_marker,
Marker *marker) {
marker->clip = libmv_marker.clip;
marker->frame = libmv_marker.frame;
marker->track = libmv_marker.track;
marker->center(0) = libmv_marker.center[0];
marker->center(1) = libmv_marker.center[1];
for (int i = 0; i < 4; i++) {
marker->patch.coordinates(i, 0) = libmv_marker.patch[i][0];
marker->patch.coordinates(i, 1) = libmv_marker.patch[i][1];
}
marker->search_region.min(0) = libmv_marker.search_region_min[0];
marker->search_region.min(1) = libmv_marker.search_region_min[1];
marker->search_region.max(0) = libmv_marker.search_region_max[0];
marker->search_region.max(1) = libmv_marker.search_region_max[1];
marker->weight = libmv_marker.weight;
marker->source = (Marker::Source) libmv_marker.source;
marker->status = (Marker::Status) libmv_marker.status;
marker->reference_clip = libmv_marker.reference_clip;
marker->reference_frame = libmv_marker.reference_frame;
marker->model_type = (Marker::ModelType) libmv_marker.model_type;
marker->model_id = libmv_marker.model_id;
marker->disabled_channels = libmv_marker.disabled_channels;
}
void libmv_markerToApiMarker(const Marker& marker,
libmv_Marker *libmv_marker) {
libmv_marker->clip = marker.clip;
libmv_marker->frame = marker.frame;
libmv_marker->track = marker.track;
libmv_marker->center[0] = marker.center(0);
libmv_marker->center[1] = marker.center(1);
for (int i = 0; i < 4; i++) {
libmv_marker->patch[i][0] = marker.patch.coordinates(i, 0);
libmv_marker->patch[i][1] = marker.patch.coordinates(i, 1);
}
libmv_marker->search_region_min[0] = marker.search_region.min(0);
libmv_marker->search_region_min[1] = marker.search_region.min(1);
libmv_marker->search_region_max[0] = marker.search_region.max(0);
libmv_marker->search_region_max[1] = marker.search_region.max(1);
libmv_marker->weight = marker.weight;
libmv_marker->source = (libmv_MarkerSource) marker.source;
libmv_marker->status = (libmv_MarkerStatus) marker.status;
libmv_marker->reference_clip = marker.reference_clip;
libmv_marker->reference_frame = marker.reference_frame;
libmv_marker->model_type = (libmv_MarkerModelType) marker.model_type;
libmv_marker->model_id = marker.model_id;
libmv_marker->disabled_channels = marker.disabled_channels;
}
libmv_TracksN* libmv_tracksNewN(void) {
Tracks* tracks = LIBMV_OBJECT_NEW(Tracks);
return (libmv_TracksN*) tracks;
}
void libmv_tracksDestroyN(libmv_TracksN* libmv_tracks) {
LIBMV_OBJECT_DELETE(libmv_tracks, Tracks);
}
void libmv_tracksAddMarkerN(libmv_TracksN* libmv_tracks,
const libmv_Marker* libmv_marker) {
Marker marker;
libmv_apiMarkerToMarker(*libmv_marker, &marker);
((Tracks*) libmv_tracks)->AddMarker(marker);
}
void libmv_tracksGetMarkerN(libmv_TracksN* libmv_tracks,
int clip,
int frame,
int track,
libmv_Marker* libmv_marker) {
Marker marker;
((Tracks*) libmv_tracks)->GetMarker(clip, frame, track, &marker);
libmv_markerToApiMarker(marker, libmv_marker);
}
void libmv_tracksRemoveMarkerN(libmv_TracksN* libmv_tracks,
int clip,
int frame,
int track) {
((Tracks *) libmv_tracks)->RemoveMarker(clip, frame, track);
}
void libmv_tracksRemoveMarkersForTrack(libmv_TracksN* libmv_tracks,
int track) {
((Tracks *) libmv_tracks)->RemoveMarkersForTrack(track);
}
int libmv_tracksMaxClipN(libmv_TracksN* libmv_tracks) {
return ((Tracks*) libmv_tracks)->MaxClip();
}
int libmv_tracksMaxFrameN(libmv_TracksN* libmv_tracks, int clip) {
return ((Tracks*) libmv_tracks)->MaxFrame(clip);
}
int libmv_tracksMaxTrackN(libmv_TracksN* libmv_tracks) {
return ((Tracks*) libmv_tracks)->MaxTrack();
}
int libmv_tracksNumMarkersN(libmv_TracksN* libmv_tracks) {
return ((Tracks*) libmv_tracks)->NumMarkers();
}

View File

@@ -1,129 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
// TODO(serrgey): For the time being we're converting simple pipeline
// to an autotrack pipeline we call it tracks.
// Once we've done with porting we remove N.
#ifndef LIBMV_C_API_TRACKSN_H_
#define LIBMV_C_API_TRACKSN_H_
#ifdef __cplusplus
extern "C" {
#endif
typedef struct libmv_TracksN libmv_TracksN;
// Keep order in this enums exactly the same as in mv::Marker.
// Otherwise API wouldn't convert the values properly.
typedef enum libmv_MarkerSource {
LIBMV_MARKER_SOURCE_MANUAL,
LIBMV_MARKER_SOURCE_DETECTED,
LIBMV_MARKER_SOURCE_TRACKED,
LIBMV_MARKER_SOURCE_MATCHED,
LIBMV_MARKER_SOURCE_PREDICTED,
} libmv_MarkerSource;
typedef enum libmv_MarkerStatus {
LIBMV_MARKER_STATUS_UNKNOWN,
LIBMV_MARKER_STATUS_INLIER,
LIBMV_MARKER_STATUS_OUTLIER,
} libmv_MarkerStatus;
typedef enum libmv_MarkerModelType {
LIBMV_MARKER_MODEL_TYPE_POINT,
LIBMV_MARKER_MODEL_TYPE_PLANE,
LIBMV_MARKER_MODEL_TYPE_LINE,
LIBMV_MARKER_MODEL_TYPE_CUBE,
} libmv_MarkerModelType;
enum libmv_MarkerChannel {
LIBMV_MARKER_CHANNEL_R = (1 << 0),
LIBMV_MARKER_CHANNEL_G = (1 << 1),
LIBMV_MARKER_CHANNEL_B = (1 << 2),
};
typedef struct libmv_Marker {
int clip;
int frame;
int track;
float center[2];
float patch[4][2];
float search_region_min[2];
float search_region_max[2];
float weight;
libmv_MarkerSource source;
libmv_MarkerStatus status;
int reference_clip;
int reference_frame;
libmv_MarkerModelType model_type;
int model_id;
int disabled_channels;
} libmv_Marker;
#ifdef __cplusplus
namespace mv {
struct Marker;
}
void libmv_apiMarkerToMarker(const libmv_Marker& libmv_marker,
mv::Marker *marker);
void libmv_markerToApiMarker(const mv::Marker& marker,
libmv_Marker *libmv_marker);
#endif
libmv_TracksN* libmv_tracksNewN(void);
void libmv_tracksDestroyN(libmv_TracksN* libmv_tracks);
void libmv_tracksAddMarkerN(libmv_TracksN* libmv_tracks,
const libmv_Marker* libmv_marker);
void libmv_tracksGetMarkerN(libmv_TracksN* libmv_tracks,
int clip,
int frame,
int track,
libmv_Marker* libmv_marker);
void libmv_tracksRemoveMarkerN(libmv_TracksN* libmv_tracks,
int clip,
int frame,
int track);
void libmv_tracksRemoveMarkersForTrack(libmv_TracksN* libmv_tracks,
int track);
int libmv_tracksMaxClipN(libmv_TracksN* libmv_tracks);
int libmv_tracksMaxFrameN(libmv_TracksN* libmv_tracks, int clip);
int libmv_tracksMaxTrackN(libmv_TracksN* libmv_tracks);
int libmv_tracksNumMarkersN(libmv_TracksN* libmv_tracks);
#ifdef __cplusplus
}
#endif
#endif // LIBMV_C_API_TRACKS_H_

View File

@@ -1,62 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2013 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#ifndef LIBMV_C_API_UTILDEFINES_H_
#define LIBMV_C_API_UTILDEFINES_H_
#if defined(_MSC_VER) && _MSC_VER < 1900
# define __func__ __FUNCTION__
# define snprintf _snprintf
#endif
#ifdef WITH_LIBMV_GUARDED_ALLOC
# include "MEM_guardedalloc.h"
# define LIBMV_OBJECT_NEW OBJECT_GUARDED_NEW
# define LIBMV_OBJECT_DELETE OBJECT_GUARDED_DELETE
# define LIBMV_OBJECT_DELETE OBJECT_GUARDED_DELETE
# define LIBMV_STRUCT_NEW(type, count) \
(type*)MEM_mallocN(sizeof(type) * count, __func__)
# define LIBMV_STRUCT_DELETE(what) MEM_freeN(what)
#else
// Need this to keep libmv-capi potentially standalone.
# if defined __GNUC__ || defined __sun
# define LIBMV_OBJECT_NEW(type, args ...) \
new(malloc(sizeof(type))) type(args)
# else
# define LIBMV_OBJECT_NEW(type, ...) \
new(malloc(sizeof(type))) type(__VA_ARGS__)
#endif
# define LIBMV_OBJECT_DELETE(what, type) \
{ \
if (what) { \
((type*)(what))->~type(); \
free(what); \
} \
} (void)0
# define LIBMV_STRUCT_NEW(type, count) (type*)malloc(sizeof(type) * count)
# define LIBMV_STRUCT_DELETE(what) { if (what) free(what); } (void)0
#endif
#endif // LIBMV_C_API_UTILDEFINES_H_

View File

@@ -1,42 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#ifndef LIBMV_C_API_H
#define LIBMV_C_API_H
#include "intern/autotrack.h"
#include "intern/camera_intrinsics.h"
#include "intern/detector.h"
#include "intern/frame_accessor.h"
#include "intern/homography.h"
#include "intern/image.h"
#include "intern/logging.h"
#include "intern/reconstruction.h"
#include "intern/track_region.h"
#include "intern/tracks.h"
#include "intern/tracksN.h"
#endif // LIBMV_C_API_H

View File

@@ -1,291 +0,0 @@
// Copyright (c) 2014 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// Author: mierle@gmail.com (Keir Mierle)
#include "libmv/autotrack/autotrack.h"
#include "libmv/autotrack/quad.h"
#include "libmv/autotrack/frame_accessor.h"
#include "libmv/autotrack/predict_tracks.h"
#include "libmv/base/scoped_ptr.h"
#include "libmv/logging/logging.h"
#include "libmv/numeric/numeric.h"
namespace mv {
namespace {
class DisableChannelsTransform : public FrameAccessor::Transform {
public:
DisableChannelsTransform(int disabled_channels)
: disabled_channels_(disabled_channels) { }
int64_t key() const {
return disabled_channels_;
}
void run(const FloatImage& input, FloatImage* output) const {
bool disable_red = (disabled_channels_ & Marker::CHANNEL_R) != 0,
disable_green = (disabled_channels_ & Marker::CHANNEL_G) != 0,
disable_blue = (disabled_channels_ & Marker::CHANNEL_B) != 0;
LG << "Disabling channels: "
<< (disable_red ? "R " : "")
<< (disable_green ? "G " : "")
<< (disable_blue ? "B" : "");
// It's important to rescale the resultappropriately so that e.g. if only
// blue is selected, it's not zeroed out.
float scale = (disable_red ? 0.0f : 0.2126f) +
(disable_green ? 0.0f : 0.7152f) +
(disable_blue ? 0.0f : 0.0722f);
output->Resize(input.Height(), input.Width(), 1);
for (int y = 0; y < input.Height(); y++) {
for (int x = 0; x < input.Width(); x++) {
float r = disable_red ? 0.0f : input(y, x, 0);
float g = disable_green ? 0.0f : input(y, x, 1);
float b = disable_blue ? 0.0f : input(y, x, 2);
(*output)(y, x, 0) = (0.2126f * r + 0.7152f * g + 0.0722f * b) / scale;
}
}
}
private:
// Bitfield representing visible channels, bits are from Marker::Channel.
int disabled_channels_;
};
template<typename QuadT, typename ArrayT>
void QuadToArrays(const QuadT& quad, ArrayT* x, ArrayT* y) {
for (int i = 0; i < 4; ++i) {
x[i] = quad.coordinates(i, 0);
y[i] = quad.coordinates(i, 1);
}
}
void MarkerToArrays(const Marker& marker, double* x, double* y) {
Quad2Df offset_quad = marker.patch;
Vec2f origin = marker.search_region.Rounded().min;
offset_quad.coordinates.rowwise() -= origin.transpose();
QuadToArrays(offset_quad, x, y);
x[4] = marker.center.x() - origin(0);
y[4] = marker.center.y() - origin(1);
}
FrameAccessor::Key GetImageForMarker(const Marker& marker,
FrameAccessor* frame_accessor,
FloatImage* image) {
// TODO(sergey): Currently we pass float region to the accessor,
// but we don't want the accessor to decide the rounding, so we
// do rounding here.
// Ideally we would need to pass IntRegion to the frame accessor.
Region region = marker.search_region.Rounded();
libmv::scoped_ptr<FrameAccessor::Transform> transform = NULL;
if (marker.disabled_channels != 0) {
transform.reset(new DisableChannelsTransform(marker.disabled_channels));
}
return frame_accessor->GetImage(marker.clip,
marker.frame,
FrameAccessor::MONO,
0, // No downscale for now.
&region,
transform.get(),
image);
}
} // namespace
bool AutoTrack::TrackMarker(Marker* tracked_marker,
TrackRegionResult* result,
const TrackRegionOptions* track_options) {
// Try to predict the location of the second marker.
bool predicted_position = false;
if (PredictMarkerPosition(tracks_, tracked_marker)) {
LG << "Succesfully predicted!";
predicted_position = true;
} else {
LG << "Prediction failed; trying to track anyway.";
}
Marker reference_marker;
tracks_.GetMarker(tracked_marker->reference_clip,
tracked_marker->reference_frame,
tracked_marker->track,
&reference_marker);
// Convert markers into the format expected by TrackRegion.
double x1[5], y1[5];
MarkerToArrays(reference_marker, x1, y1);
double x2[5], y2[5];
MarkerToArrays(*tracked_marker, x2, y2);
// TODO(keir): Technically this could take a smaller slice from the source
// image instead of taking one the size of the search window.
FloatImage reference_image;
FrameAccessor::Key reference_key = GetImageForMarker(reference_marker,
frame_accessor_,
&reference_image);
if (!reference_key) {
LG << "Couldn't get frame for reference marker: " << reference_marker;
return false;
}
FloatImage tracked_image;
FrameAccessor::Key tracked_key = GetImageForMarker(*tracked_marker,
frame_accessor_,
&tracked_image);
if (!tracked_key) {
frame_accessor_->ReleaseImage(reference_key);
LG << "Couldn't get frame for tracked marker: " << tracked_marker;
return false;
}
// Store original position befoer tracking, so we can claculate offset later.
Vec2f original_center = tracked_marker->center;
// Do the tracking!
TrackRegionOptions local_track_region_options;
if (track_options) {
local_track_region_options = *track_options;
}
local_track_region_options.num_extra_points = 1; // For center point.
local_track_region_options.attempt_refine_before_brute = predicted_position;
TrackRegion(reference_image,
tracked_image,
x1, y1,
local_track_region_options,
x2, y2,
result);
// Copy results over the tracked marker.
Vec2f tracked_origin = tracked_marker->search_region.Rounded().min;
for (int i = 0; i < 4; ++i) {
tracked_marker->patch.coordinates(i, 0) = x2[i] + tracked_origin[0];
tracked_marker->patch.coordinates(i, 1) = y2[i] + tracked_origin[1];
}
tracked_marker->center(0) = x2[4] + tracked_origin[0];
tracked_marker->center(1) = y2[4] + tracked_origin[1];
Vec2f delta = tracked_marker->center - original_center;
tracked_marker->search_region.Offset(delta);
tracked_marker->source = Marker::TRACKED;
tracked_marker->status = Marker::UNKNOWN;
tracked_marker->reference_clip = reference_marker.clip;
tracked_marker->reference_frame = reference_marker.frame;
// Release the images from the accessor cache.
frame_accessor_->ReleaseImage(reference_key);
frame_accessor_->ReleaseImage(tracked_key);
// TODO(keir): Possibly the return here should get removed since the results
// are part of TrackResult. However, eventually the autotrack stuff will have
// extra status (e.g. prediction fail, etc) that should get included.
return true;
}
void AutoTrack::AddMarker(const Marker& marker) {
tracks_.AddMarker(marker);
}
void AutoTrack::SetMarkers(vector<Marker>* markers) {
tracks_.SetMarkers(markers);
}
bool AutoTrack::GetMarker(int clip, int frame, int track,
Marker* markers) const {
return tracks_.GetMarker(clip, frame, track, markers);
}
void AutoTrack::DetectAndTrack(const DetectAndTrackOptions& options) {
int num_clips = frame_accessor_->NumClips();
for (int clip = 0; clip < num_clips; ++clip) {
int num_frames = frame_accessor_->NumFrames(clip);
vector<Marker> previous_frame_markers;
// Q: How to decide track #s when detecting?
// Q: How to match markers from previous frame? set of prev frame tracks?
// Q: How to decide what markers should get tracked and which ones should not?
for (int frame = 0; frame < num_frames; ++frame) {
if (Cancelled()) {
LG << "Got cancel message while detecting and tracking...";
return;
}
// First, get or detect markers for this frame.
vector<Marker> this_frame_markers;
tracks_.GetMarkersInFrame(clip, frame, &this_frame_markers);
LG << "Clip " << clip << ", frame " << frame << " have "
<< this_frame_markers.size();
if (this_frame_markers.size() < options.min_num_features) {
DetectFeaturesInFrame(clip, frame);
this_frame_markers.clear();
tracks_.GetMarkersInFrame(clip, frame, &this_frame_markers);
LG << "... detected " << this_frame_markers.size() << " features.";
}
if (previous_frame_markers.empty()) {
LG << "First frame; skipping tracking stage.";
previous_frame_markers.swap(this_frame_markers);
continue;
}
// Second, find tracks that should get tracked forward into this frame.
// To avoid tracking markers that are already tracked to this frame, make
// a sorted set of the tracks that exist in the last frame.
vector<int> tracks_in_this_frame;
for (int i = 0; i < this_frame_markers.size(); ++i) {
tracks_in_this_frame.push_back(this_frame_markers[i].track);
}
std::sort(tracks_in_this_frame.begin(),
tracks_in_this_frame.end());
// Find tracks in the previous frame that are not in this one.
vector<Marker*> previous_frame_markers_to_track;
int num_skipped = 0;
for (int i = 0; i < previous_frame_markers.size(); ++i) {
if (std::binary_search(tracks_in_this_frame.begin(),
tracks_in_this_frame.end(),
previous_frame_markers[i].track)) {
num_skipped++;
} else {
previous_frame_markers_to_track.push_back(&previous_frame_markers[i]);
}
}
// Finally track the markers from the last frame into this one.
// TODO(keir): Use OMP.
for (int i = 0; i < previous_frame_markers_to_track.size(); ++i) {
Marker this_frame_marker = *previous_frame_markers_to_track[i];
this_frame_marker.frame = frame;
LG << "Tracking: " << this_frame_marker;
TrackRegionResult result;
TrackMarker(&this_frame_marker, &result);
if (result.is_usable()) {
LG << "Success: " << this_frame_marker;
AddMarker(this_frame_marker);
this_frame_markers.push_back(this_frame_marker);
} else {
LG << "Failed to track: " << this_frame_marker;
}
}
// Put the markers from this frame
previous_frame_markers.swap(this_frame_markers);
}
}
}
} // namespace mv

View File

@@ -1,226 +0,0 @@
// Copyright (c) 2014 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// Author: mierle@gmail.com (Keir Mierle)
#ifndef LIBMV_AUTOTRACK_AUTOTRACK_H_
#define LIBMV_AUTOTRACK_AUTOTRACK_H_
#include "libmv/autotrack/tracks.h"
#include "libmv/autotrack/region.h"
#include "libmv/tracking/track_region.h"
namespace libmv {
class CameraIntrinsics;
};
namespace mv {
using libmv::CameraIntrinsics;
using libmv::TrackRegionOptions;
using libmv::TrackRegionResult;
struct FrameAccessor;
class OperationListener;
// The coordinator of all tracking operations; keeps track of all state
// relating to tracking and reconstruction; for example, 2D tracks and motion
// models, reconstructed cameras, points, and planes; tracking settings; etc.
//
// Typical usage for full autotrack:
//
// AutoTrack auto_track(image_accessor);
// auto_track.SetNumFramesInClip(0, 10);
// auto_track.SetNumFramesInClip(1, 54);
// auto_track.AutoTrack()
//
// It is also possible to specify options to control the reconstruction.
// Furthermore, the individual methods of reconstruction are exposed to make it
// possible to interact with the pipeline as it runs. For example, to track one
// marker across frames,
//
// AutoTrack auto_track(image_accessor);
// auto_track.SetNumFramesInClip(0, 10);
// auto_track.SetNumFramesInClip(1, 54);
// auto_track.AddMarker(...);
// auto_track.TrackMarkerToFrame(int clip1, int frame1,
// int clip2, int frame2,
// options?)
//
class AutoTrack {
public:
struct Options {
// Default configuration for 2D tracking when calling TrackMarkerToFrame().
TrackRegionOptions track_region;
// Default search window for region tracking, in absolute frame pixels.
Region search_region;
};
AutoTrack(FrameAccessor* frame_accessor)
: frame_accessor_(frame_accessor) {}
// Marker manipulation.
// Clip manipulation.
// Set the number of clips. These clips will get accessed from the frame
// accessor, matches between frames found, and a reconstruction created.
//void SetNumFrames(int clip, int num_frames);
// Tracking & Matching
// Find the marker for the track in the frame indicated by the marker.
// Caller maintains ownership of *result and *tracked_marker.
bool TrackMarker(Marker* tracked_marker,
TrackRegionResult* result,
const TrackRegionOptions* track_options=NULL);
// Wrapper around Tracks API; however these may add additional processing.
void AddMarker(const Marker& tracked_marker);
void SetMarkers(vector<Marker>* markers);
bool GetMarker(int clip, int frame, int track, Marker* marker) const;
// TODO(keir): Implement frame matching! This could be very cool for loop
// closing and connecting across clips.
//void MatchFrames(int clip1, int frame1, int clip2, int frame2) {}
// Wrapper around the Reconstruction API.
// Returns the new ID.
int AddCameraIntrinsics(CameraIntrinsics* intrinsics) {
(void) intrinsics;
return 0;
} // XXX
int SetClipIntrinsics(int clip, int intrinsics) {
(void) clip;
(void) intrinsics;
return 0;
} // XXX
enum Motion {
GENERAL_CAMERA_MOTION,
TRIPOD_CAMERA_MOTION,
};
int SetClipMotion(int clip, Motion motion) {
(void) clip;
(void) motion;
return 0;
} // XXX
// Decide what to refine for the given intrinsics. bundle_options is from
// bundle.h (e.g. BUNDLE_FOCAL_LENGTH | BUNDLE_RADIAL_K1).
void SetIntrinsicsRefine(int intrinsics, int bundle_options) {
(void) intrinsics;
(void) bundle_options;
} // XXX
// Keyframe read/write.
struct ClipFrame {
int clip;
int frame;
};
const vector<ClipFrame>& keyframes() { return keyframes_; }
void ClearKeyframes() { keyframes_.clear(); }
void SetKeyframes(const vector<ClipFrame>& keyframes) {
keyframes_ = keyframes;
}
// What about reporting what happened? -- callbacks; maybe result struct.
void Reconstruct();
// Detect and track in 2D.
struct DetectAndTrackOptions {
int min_num_features;
};
void DetectAndTrack(const DetectAndTrackOptions& options);
struct DetectFeaturesInFrameOptions {
};
void DetectFeaturesInFrame(int clip, int frame,
const DetectFeaturesInFrameOptions* options=NULL) {
(void) clip;
(void) frame;
(void) options;
} // XXX
// Does not take ownership of the given listener, but keeps a reference to it.
void AddListener(OperationListener* listener) {(void) listener;} // XXX
// Create the initial reconstruction,
//void FindInitialReconstruction();
// State machine
//
// Question: Have explicit state? Or determine state from existing data?
// Conclusion: Determine state from existing data.
//
// Preliminary state thoughts
//
// No tracks or markers
// - Tracks empty.
//
// Initial tracks found
// - All images have at least 5 tracks
//
// Ran RANSAC on tracks to mark inliers / outliers.
// - All images have at least 8 "inlier" tracks
//
// Detector matching run to close loops and match across clips
// - At least 5 matching tracks between clips
//
// Initial reconstruction found (2 frames)?
// - There exists two cameras with intrinsics / extrinsics
//
// Preliminary reconstruction finished
// - Poses for all frames in all clips estimated.
//
// Final reconstruction finished
// - Final reconstruction bundle adjusted.
// For now, expose options directly. In the future this may change.
Options options;
private:
bool Log();
bool Progress();
bool Cancelled() { return false; }
Tracks tracks_; // May be normalized camera coordinates or raw pixels.
//Reconstruction reconstruction_;
// TODO(keir): Add the motion models here.
//vector<MotionModel> motion_models_;
// TODO(keir): Should num_clips and num_frames get moved to FrameAccessor?
// TODO(keir): What about masking for clips and frames to prevent various
// things like reconstruction or tracking from happening on certain frames?
FrameAccessor* frame_accessor_;
//int num_clips_;
//vector<int> num_frames_; // Indexed by clip.
// The intrinsics for each clip, assuming each clip has fixed intrinsics.
// TODO(keir): Decide what the semantics should be for varying focal length.
vector<int> clip_intrinsics_;
vector<ClipFrame> keyframes_;
};
} // namespace mv
#endif // LIBMV_AUTOTRACK_AUTOTRACK_H_

View File

@@ -1,38 +0,0 @@
// Copyright (c) 2014 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// Author: mierle@gmail.com (Keir Mierle)
#ifndef LIBMV_AUTOTRACK_LISTENER_H_
#define LIBMV_AUTOTRACK_LISTENER_H_
namespace mv {
struct OperationListener {
// All hooks return true to continue or false to indicate the operation
// should abort. Hooks should be thread safe (reentrant).
virtual bool Log(const string& message) = 0;
virtual bool Progress(double fraction) = 0;
virtual bool Cancelled() = 0;
};
} // namespace mv
#endif // LIBMV_AUTOTRACK_LISTENER_H_

View File

@@ -1,86 +0,0 @@
// Copyright (c) 2014 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// Author: mierle@gmail.com (Keir Mierle)
#ifndef LIBMV_AUTOTRACK_FRAME_ACCESSOR_H_
#define LIBMV_AUTOTRACK_FRAME_ACCESSOR_H_
#include <stdint.h>
#include "libmv/image/image.h"
namespace mv {
struct Region;
using libmv::FloatImage;
// This is the abstraction to different sources of images that will be part of
// a reconstruction. These may come from disk or they may come from Blender. In
// most cases it's expected that the implementation provides some caching
// otherwise performance will be terrible. Sometimes the images need to get
// filtered, and this interface provides for that as well (and permits
// implementations to cache filtered image pieces).
struct FrameAccessor {
struct Transform {
virtual ~Transform() { }
// The key should depend on the transform arguments. Must be non-zero.
virtual int64_t key() const = 0;
// Apply the expected transform. Output is sized correctly already.
// TODO(keir): What about blurs that need to access pixels outside the ROI?
virtual void run(const FloatImage& input, FloatImage* output) const = 0;
};
enum InputMode {
MONO,
RGBA
};
typedef void* Key;
// Get a possibly-filtered version of a frame of a video. Downscale will
// cause the input image to get downscaled by 2^downscale for pyramid access.
// Region is always in original-image coordinates, and describes the
// requested area. The transform describes an (optional) transform to apply
// to the image before it is returned.
//
// When done with an image, you must call ReleaseImage with the returned key.
virtual Key GetImage(int clip,
int frame,
InputMode input_mode,
int downscale, // Downscale by 2^downscale.
const Region* region, // Get full image if NULL.
const Transform* transform, // May be NULL.
FloatImage* destination) = 0;
// Releases an image from the frame accessor. Non-caching implementations may
// free the image immediately; others may hold onto the image.
virtual void ReleaseImage(Key) = 0;
virtual bool GetClipDimensions(int clip, int* width, int* height) = 0;
virtual int NumClips() = 0;
virtual int NumFrames(int clip) = 0;
};
} // namespace libmv
#endif // LIBMV_AUTOTRACK_FRAME_ACCESSOR_H_

View File

@@ -1,144 +0,0 @@
// Copyright (c) 2014 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// Author: mierle@gmail.com (Keir Mierle)
#ifndef LIBMV_AUTOTRACK_MARKER_H_
#define LIBMV_AUTOTRACK_MARKER_H_
#include <ostream>
#include "libmv/autotrack/quad.h"
#include "libmv/autotrack/region.h"
#include "libmv/numeric/numeric.h"
namespace mv {
using libmv::Vec2f;
// A marker is the 2D location of a tracked region (quad) in an image.
// Note that some of this information could be normalized by having a
// collection of inter-connected structs. Instead the "fat Marker" design below
// trades memory for data structure simplicity.
struct Marker {
int clip; // The clip this marker is from.
int frame; // The frame within the clip this marker is from.
int track; // The track this marker is from.
// The center of the marker in frame coordinates. This is typically, but not
// always, the same as the center of the patch.
Vec2f center;
// A frame-realtive quad defining the part of the image the marker covers.
// For reference markers, the pixels in the patch are the tracking pattern.
Quad2Df patch;
// Some markers are less certain than others; the weight determines the
// amount this marker contributes to the error. 1.0 indicates normal
// contribution; 0.0 indicates a zero-weight track (and will be omitted from
// bundle adjustment).
float weight;
enum Source {
MANUAL, // The user placed this marker manually.
DETECTED, // A keypoint detector found this point.
TRACKED, // The tracking algorithm placed this marker.
MATCHED, // A matching algorithm (e.g. SIFT or SURF or ORB) found this.
PREDICTED, // A motion model predicted this marker. This is needed for
// handling occlusions in some cases where an imaginary marker
// is placed to keep camera motion smooth.
};
Source source;
// Markers may be inliers or outliers if the tracking fails; this allows
// visualizing the markers in the image.
enum Status {
UNKNOWN,
INLIER,
OUTLIER
};
Status status;
// When doing correlation tracking, where to search in the current frame for
// the pattern from the reference frame, in absolute frame coordinates.
Region search_region;
// For tracked and matched markers, indicates what the reference was.
int reference_clip;
int reference_frame;
// Model related information for non-point tracks.
//
// Some tracks are on a larger object, such as a plane or a line or perhaps
// another primitive (a rectangular prisim). This captures the information
// needed to say that for example a collection of markers belongs to model #2
// (and model #2 is a plane).
enum ModelType {
POINT,
PLANE,
LINE,
CUBE
};
ModelType model_type;
// The model ID this track (e.g. the second model, which is a plane).
int model_id;
// TODO(keir): Add a "int model_argument" to capture that e.g. a marker is on
// the 3rd face of a cube.
enum Channel {
CHANNEL_R = (1 << 0),
CHANNEL_G = (1 << 1),
CHANNEL_B = (1 << 2),
};
// Channels from the original frame which this marker is unable to see.
int disabled_channels;
// Offset everything (center, patch, search) by the given delta.
template<typename T>
void Offset(const T& offset) {
center += offset.template cast<float>();
patch.coordinates.rowwise() += offset.template cast<int>();
search_region.Offset(offset);
}
// Shift the center to the given new position (and patch, search).
template<typename T>
void SetPosition(const T& new_center) {
Offset(new_center - center);
}
};
inline std::ostream& operator<<(std::ostream& out, const Marker& marker) {
out << "{"
<< marker.clip << ", "
<< marker.frame << ", "
<< marker.track << ", ("
<< marker.center.x() << ", "
<< marker.center.y() << ")"
<< "}";
return out;
}
} // namespace mv
#endif // LIBMV_AUTOTRACK_MARKER_H_

View File

@@ -1,44 +0,0 @@
// Copyright (c) 2014 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// Author: mierle@gmail.com (Keir Mierle)
#ifndef LIBMV_AUTOTRACK_MODEL_H_
#define LIBMV_AUTOTRACK_MODEL_H_
#include "libmv/numeric/numeric.h"
#include "libmv/autotrack/quad.h"
namespace mv {
struct Model {
enum ModelType {
POINT,
PLANE,
LINE,
CUBE
};
// ???
};
} // namespace mv
#endif // LIBMV_AUTOTRACK_MODEL_H_

View File

@@ -1,316 +0,0 @@
// Copyright (c) 2014 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// Author: mierle@gmail.com (Keir Mierle)
#include "libmv/autotrack/marker.h"
#include "libmv/autotrack/predict_tracks.h"
#include "libmv/autotrack/tracks.h"
#include "libmv/base/vector.h"
#include "libmv/logging/logging.h"
#include "libmv/tracking/kalman_filter.h"
namespace mv {
namespace {
using libmv::vector;
using libmv::Vec2;
// Implied time delta between steps. Set empirically by tweaking and seeing
// what numbers did best at prediction.
const double dt = 3.8;
// State transition matrix.
// The states for predicting a track are as follows:
//
// 0 - X position
// 1 - X velocity
// 2 - X acceleration
// 3 - Y position
// 4 - Y velocity
// 5 - Y acceleration
//
// Note that in the velocity-only state transition matrix, the acceleration
// component is ignored; so technically the system could be modelled with only
// 4 states instead of 6. For ease of implementation, this keeps order 6.
// Choose one or the other model from below (velocity or acceleration).
// For a typical system having constant velocity. This gives smooth-appearing
// predictions, but they are not always as accurate.
const double velocity_state_transition_data[] = {
1, dt, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0,
0, 0, 0, 1, dt, 0,
0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 1
};
// This 3rd-order system also models acceleration. This makes for "jerky"
// predictions, but that tend to be more accurate.
const double acceleration_state_transition_data[] = {
1, dt, dt*dt/2, 0, 0, 0,
0, 1, dt, 0, 0, 0,
0, 0, 1, 0, 0, 0,
0, 0, 0, 1, dt, dt*dt/2,
0, 0, 0, 0, 1, dt,
0, 0, 0, 0, 0, 1
};
// This system (attempts) to add an angular velocity component. However, it's
// total junk.
const double angular_state_transition_data[] = {
1, dt, -dt, 0, 0, 0, // Position x
0, 1, 0, 0, 0, 0, // Velocity x
0, 0, 1, 0, 0, 0, // Angular momentum
0, 0, dt, 1, dt, 0, // Position y
0, 0, 0, 0, 1, 0, // Velocity y
0, 0, 0, 0, 0, 1 // Ignored
};
const double* state_transition_data = velocity_state_transition_data;
// Observation matrix.
const double observation_data[] = {
1., 0., 0., 0., 0., 0.,
0., 0., 0., 1., 0., 0.
};
// Process covariance.
const double process_covariance_data[] = {
35, 0, 0, 0, 0, 0,
0, 5, 0, 0, 0, 0,
0, 0, 5, 0, 0, 0,
0, 0, 0, 35, 0, 0,
0, 0, 0, 0, 5, 0,
0, 0, 0, 0, 0, 5
};
// Process covariance.
const double measurement_covariance_data[] = {
0.01, 0.00,
0.00, 0.01,
};
// Initial covariance.
const double initial_covariance_data[] = {
10, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0,
0, 0, 0, 10, 0, 0,
0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 1
};
typedef mv::KalmanFilter<double, 6, 2> TrackerKalman;
TrackerKalman filter(state_transition_data,
observation_data,
process_covariance_data,
measurement_covariance_data);
bool OrderByFrameLessThan(const Marker* a, const Marker* b) {
if (a->frame == b->frame) {
if (a->clip == b->clip) {
return a->track < b->track;
}
return a->clip < b->clip;
}
return a->frame < b-> frame;
}
// Predicted must be after the previous markers (in the frame numbering sense).
void RunPrediction(const vector<Marker*> previous_markers,
Marker* predicted_marker) {
TrackerKalman::State state;
state.mean << previous_markers[0]->center.x(), 0, 0,
previous_markers[0]->center.y(), 0, 0;
state.covariance = Eigen::Matrix<double, 6, 6, Eigen::RowMajor>(
initial_covariance_data);
int current_frame = previous_markers[0]->frame;
int target_frame = predicted_marker->frame;
bool predict_forward = current_frame < target_frame;
int frame_delta = predict_forward ? 1 : -1;
for (int i = 1; i < previous_markers.size(); ++i) {
// Step forward predicting the state until it is on the current marker.
int predictions = 0;
for (;
current_frame != previous_markers[i]->frame;
current_frame += frame_delta) {
filter.Step(&state);
predictions++;
LG << "Predicted point (frame " << current_frame << "): "
<< state.mean(0) << ", " << state.mean(3);
}
// Log the error -- not actually used, but interesting.
Vec2 error = previous_markers[i]->center.cast<double>() -
Vec2(state.mean(0), state.mean(3));
LG << "Prediction error for " << predictions << " steps: ("
<< error.x() << ", " << error.y() << "); norm: " << error.norm();
// Now that the state is predicted in the current frame, update the state
// based on the measurement from the current frame.
filter.Update(previous_markers[i]->center.cast<double>(),
Eigen::Matrix<double, 2, 2, Eigen::RowMajor>(
measurement_covariance_data),
&state);
LG << "Updated point: " << state.mean(0) << ", " << state.mean(3);
}
// At this point as all the prediction that's possible is done. Finally
// predict until the target frame.
for (; current_frame != target_frame; current_frame += frame_delta) {
filter.Step(&state);
LG << "Final predicted point (frame " << current_frame << "): "
<< state.mean(0) << ", " << state.mean(3);
}
// The x and y positions are at 0 and 3; ignore acceleration and velocity.
predicted_marker->center.x() = state.mean(0);
predicted_marker->center.y() = state.mean(3);
// Take the patch from the last marker then shift it to match the prediction.
const Marker& last_marker = *previous_markers[previous_markers.size() - 1];
predicted_marker->patch = last_marker.patch;
Vec2f delta = predicted_marker->center - last_marker.center;
for (int i = 0; i < 4; ++i) {
predicted_marker->patch.coordinates.row(i) += delta;
}
// Alter the search area as well so it always corresponds to the center.
predicted_marker->search_region = last_marker.search_region;
predicted_marker->search_region.Offset(delta);
}
} // namespace
bool PredictMarkerPosition(const Tracks& tracks, Marker* marker) {
// Get all markers for this clip and track.
vector<Marker> markers;
tracks.GetMarkersForTrackInClip(marker->clip, marker->track, &markers);
if (markers.empty()) {
LG << "No markers to predict from for " << *marker;
return false;
}
// Order the markers by frame within the clip.
vector<Marker*> boxed_markers(markers.size());
for (int i = 0; i < markers.size(); ++i) {
boxed_markers[i] = &markers[i];
}
std::sort(boxed_markers.begin(), boxed_markers.end(), OrderByFrameLessThan);
// Find the insertion point for this marker among the returned ones.
int insert_at = -1; // If we find the exact frame
int insert_before = -1; // Otherwise...
for (int i = 0; i < boxed_markers.size(); ++i) {
if (boxed_markers[i]->frame == marker->frame) {
insert_at = i;
break;
}
if (boxed_markers[i]->frame > marker->frame) {
insert_before = i;
break;
}
}
// Forward starts at the marker or insertion point, and goes forward.
int forward_scan_begin, forward_scan_end;
// Backward scan starts at the marker or insertion point, and goes backward.
int backward_scan_begin, backward_scan_end;
// Determine the scanning ranges.
if (insert_at == -1 && insert_before == -1) {
// Didn't find an insertion point except the end.
forward_scan_begin = forward_scan_end = 0;
backward_scan_begin = markers.size() - 1;
backward_scan_end = 0;
} else if (insert_at != -1) {
// Found existing marker; scan before and after it.
forward_scan_begin = insert_at + 1;
forward_scan_end = markers.size() - 1;;
backward_scan_begin = insert_at - 1;
backward_scan_end = 0;
} else {
// Didn't find existing marker but found an insertion point.
forward_scan_begin = insert_before;
forward_scan_end = markers.size() - 1;;
backward_scan_begin = insert_before - 1;
backward_scan_end = 0;
}
const int num_consecutive_needed = 2;
if (forward_scan_begin <= forward_scan_end &&
forward_scan_end - forward_scan_begin > num_consecutive_needed) {
// TODO(keir): Finish this.
}
bool predict_forward = false;
if (backward_scan_end <= backward_scan_begin) {
// TODO(keir): Add smarter handling and detecting of consecutive frames!
predict_forward = true;
}
const int max_frames_to_predict_from = 20;
if (predict_forward) {
if (backward_scan_begin - backward_scan_end < num_consecutive_needed) {
// Not enough information to do a prediction.
LG << "Predicting forward impossible, not enough information";
return false;
}
LG << "Predicting forward";
int predict_begin =
std::max(backward_scan_begin - max_frames_to_predict_from, 0);
int predict_end = backward_scan_begin;
vector<Marker*> previous_markers;
for (int i = predict_begin; i <= predict_end; ++i) {
previous_markers.push_back(boxed_markers[i]);
}
RunPrediction(previous_markers, marker);
return true;
} else {
if (forward_scan_end - forward_scan_begin < num_consecutive_needed) {
// Not enough information to do a prediction.
LG << "Predicting backward impossible, not enough information";
return false;
}
LG << "Predicting backward";
int predict_begin =
std::min(forward_scan_begin + max_frames_to_predict_from,
forward_scan_end);
int predict_end = forward_scan_begin;
vector<Marker*> previous_markers;
for (int i = predict_begin; i >= predict_end; --i) {
previous_markers.push_back(boxed_markers[i]);
}
RunPrediction(previous_markers, marker);
return false;
}
}
} // namespace mv

View File

@@ -1,37 +0,0 @@
// Copyright (c) 2014 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// Author: mierle@gmail.com (Keir Mierle)
#ifndef LIBMV_AUTOTRACK_PREDICT_TRACKS_H_
#define LIBMV_AUTOTRACK_PREDICT_TRACKS_H_
namespace mv {
class Tracks;
struct Marker;
// Predict the position of the given marker, and update it accordingly. The
// existing position will be overwritten.
bool PredictMarkerPosition(const Tracks& tracks, Marker* marker);
} // namespace mv
#endif // LIBMV_AUTOTRACK_PREDICT_TRACKS_H_

View File

@@ -1,201 +0,0 @@
// Copyright (c) 2014 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// Author: mierle@gmail.com (Keir Mierle)
#include "libmv/autotrack/predict_tracks.h"
#include "libmv/autotrack/marker.h"
#include "libmv/autotrack/tracks.h"
#include "libmv/logging/logging.h"
#include "testing/testing.h"
namespace mv {
void AddMarker(int frame, float x, float y, Tracks* tracks) {
Marker marker;
marker.clip = marker.track = 0;
marker.frame = frame;
marker.center.x() = x;
marker.center.y() = y;
marker.patch.coordinates << x - 1, y - 1,
x + 1, y - 1,
x + 1, y + 1,
x - 1, y + 1;
tracks->AddMarker(marker);
}
TEST(PredictMarkerPosition, EasyLinearMotion) {
Tracks tracks;
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(2, 3.0, 10.0, &tracks);
AddMarker(3, 4.0, 15.0, &tracks);
AddMarker(4, 5.0, 20.0, &tracks);
AddMarker(5, 6.0, 25.0, &tracks);
AddMarker(6, 7.0, 30.0, &tracks);
AddMarker(7, 8.0, 35.0, &tracks);
Marker predicted;
predicted.clip = 0;
predicted.track = 0;
predicted.frame = 8;
PredictMarkerPosition(tracks, &predicted);
double error = (libmv::Vec2f(9.0, 40.0) - predicted.center).norm();
LG << "Got error: " << error;
EXPECT_LT(error, 0.1);
// Check the patch coordinates as well.
double x = 9, y = 40.0;
Quad2Df expected_patch;
expected_patch.coordinates << x - 1, y - 1,
x + 1, y - 1,
x + 1, y + 1,
x - 1, y + 1;
error = (expected_patch.coordinates - predicted.patch.coordinates).norm();
LG << "Patch error: " << error;
EXPECT_LT(error, 0.1);
}
TEST(PredictMarkerPosition, EasyBackwardLinearMotion) {
Tracks tracks;
AddMarker(8, 1.0, 0.0, &tracks);
AddMarker(7, 2.0, 5.0, &tracks);
AddMarker(6, 3.0, 10.0, &tracks);
AddMarker(5, 4.0, 15.0, &tracks);
AddMarker(4, 5.0, 20.0, &tracks);
AddMarker(3, 6.0, 25.0, &tracks);
AddMarker(2, 7.0, 30.0, &tracks);
AddMarker(1, 8.0, 35.0, &tracks);
Marker predicted;
predicted.clip = 0;
predicted.track = 0;
predicted.frame = 0;
PredictMarkerPosition(tracks, &predicted);
LG << predicted;
double error = (libmv::Vec2f(9.0, 40.0) - predicted.center).norm();
LG << "Got error: " << error;
EXPECT_LT(error, 0.1);
// Check the patch coordinates as well.
double x = 9.0, y = 40.0;
Quad2Df expected_patch;
expected_patch.coordinates << x - 1, y - 1,
x + 1, y - 1,
x + 1, y + 1,
x - 1, y + 1;
error = (expected_patch.coordinates - predicted.patch.coordinates).norm();
LG << "Patch error: " << error;
EXPECT_LT(error, 0.1);
}
TEST(PredictMarkerPosition, TwoFrameGap) {
Tracks tracks;
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(2, 3.0, 10.0, &tracks);
AddMarker(3, 4.0, 15.0, &tracks);
AddMarker(4, 5.0, 20.0, &tracks);
AddMarker(5, 6.0, 25.0, &tracks);
AddMarker(6, 7.0, 30.0, &tracks);
// Missing frame 7!
Marker predicted;
predicted.clip = 0;
predicted.track = 0;
predicted.frame = 8;
PredictMarkerPosition(tracks, &predicted);
double error = (libmv::Vec2f(9.0, 40.0) - predicted.center).norm();
LG << "Got error: " << error;
EXPECT_LT(error, 0.1);
}
TEST(PredictMarkerPosition, FourFrameGap) {
Tracks tracks;
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(2, 3.0, 10.0, &tracks);
AddMarker(3, 4.0, 15.0, &tracks);
// Missing frames 4, 5, 6, 7.
Marker predicted;
predicted.clip = 0;
predicted.track = 0;
predicted.frame = 8;
PredictMarkerPosition(tracks, &predicted);
double error = (libmv::Vec2f(9.0, 40.0) - predicted.center).norm();
LG << "Got error: " << error;
EXPECT_LT(error, 2.0); // Generous error due to larger prediction window.
}
TEST(PredictMarkerPosition, MultipleGaps) {
Tracks tracks;
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(2, 3.0, 10.0, &tracks);
// AddMarker(3, 4.0, 15.0, &tracks); // Note the 3-frame gap.
// AddMarker(4, 5.0, 20.0, &tracks);
// AddMarker(5, 6.0, 25.0, &tracks);
AddMarker(6, 7.0, 30.0, &tracks); // Intermediate measurement.
// AddMarker(7, 8.0, 35.0, &tracks);
Marker predicted;
predicted.clip = 0;
predicted.track = 0;
predicted.frame = 8;
PredictMarkerPosition(tracks, &predicted);
double error = (libmv::Vec2f(9.0, 40.0) - predicted.center).norm();
LG << "Got error: " << error;
EXPECT_LT(error, 1.0); // Generous error due to larger prediction window.
}
TEST(PredictMarkerPosition, MarkersInRandomOrder) {
Tracks tracks;
// This is the same as the easy, except that the tracks are randomly ordered.
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(2, 3.0, 10.0, &tracks);
AddMarker(7, 8.0, 35.0, &tracks);
AddMarker(5, 6.0, 25.0, &tracks);
AddMarker(4, 5.0, 20.0, &tracks);
AddMarker(3, 4.0, 15.0, &tracks);
AddMarker(6, 7.0, 30.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
Marker predicted;
predicted.clip = 0;
predicted.track = 0;
predicted.frame = 8;
PredictMarkerPosition(tracks, &predicted);
double error = (libmv::Vec2f(9.0, 40.0) - predicted.center).norm();
LG << "Got error: " << error;
EXPECT_LT(error, 0.1);
}
} // namespace mv

View File

@@ -1,57 +0,0 @@
// Copyright (c) 2014 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// Author: mierle@gmail.com (Keir Mierle)
#ifndef LIBMV_AUTOTRACK_QUAD_H_
#define LIBMV_AUTOTRACK_QUAD_H_
#include <Eigen/Core>
namespace mv {
template<typename T, int D>
struct Quad {
// A quad is 4 points; generally in 2D or 3D.
//
// +----------> x
// |\.
// | \.
// | z (z goes into screen)
// |
// | r0----->r1
// | ^ |
// | | . |
// | | V
// | r3<-----r2
// | \.
// | \.
// v normal goes away (right handed).
// y
//
// Each row is one of the corners coordinates; either (x, y) or (x, y, z).
Eigen::Matrix<T, 4, D> coordinates;
};
typedef Quad<float, 2> Quad2Df;
} // namespace mv
#endif // LIBMV_AUTOTRACK_QUAD_H_

View File

@@ -1,89 +0,0 @@
// Copyright (c) 2014 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// Author: mierle@gmail.com (Keir Mierle)
#ifndef LIBMV_AUTOTRACK_RECONSTRUCTION_H_
#define LIBMV_AUTOTRACK_RECONSTRUCTION_H_
#include "libmv/base/vector.h"
#include "libmv/numeric/numeric.h"
#include "libmv/simple_pipeline/camera_intrinsics.h"
namespace mv {
using libmv::CameraIntrinsics;
using libmv::vector;
class Model;
class CameraPose {
int clip;
int frame;
int intrinsics;
Mat3 R;
Vec3 t;
};
class Point {
int track;
// The coordinates of the point. Note that not all coordinates are always
// used; for example points on a plane only use the first two coordinates.
Vec3 X;
};
// A reconstruction for a set of tracks. The indexing for clip, frame, and
// track should match that of a Tracs object, stored elsewhere.
class Reconstruction {
public:
// All methods copy their input reference or take ownership of the pointer.
void AddCameraPose(const CameraPose& pose);
int AddCameraIntrinsics(CameraIntrinsics* intrinsics);
int AddPoint(const Point& point);
int AddModel(Model* model);
// Returns the corresponding pose or point or NULL if missing.
CameraPose* CameraPoseForFrame(int clip, int frame);
const CameraPose* CameraPoseForFrame(int clip, int frame) const;
Point* PointForTrack(int track);
const Point* PointForTrack(int track) const;
const vector<vector<CameraPose> >& camera_poses() const {
return camera_poses_;
}
private:
// Indexed by CameraPose::intrinsics. Owns the intrinsics objects.
vector<CameraIntrinsics*> camera_intrinsics_;
// Indexed by Marker::clip then by Marker::frame.
vector<vector<CameraPose> > camera_poses_;
// Indexed by Marker::track.
vector<Point> points_;
// Indexed by Marker::model_id. Owns model objects.
vector<Model*> models_;
};
} // namespace mv
#endif // LIBMV_AUTOTRACK_RECONSTRUCTION_H_

View File

@@ -1,67 +0,0 @@
// Copyright (c) 2014 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// Author: mierle@gmail.com (Keir Mierle)
#ifndef LIBMV_AUTOTRACK_REGION_H_
#define LIBMV_AUTOTRACK_REGION_H_
#include "libmv/numeric/numeric.h"
namespace mv {
using libmv::Vec2f;
// A region is a bounding box within an image.
//
// +----------> x
// |
// | (min.x, min.y) (max.x, min.y)
// | +-------------------------+
// | | |
// | | |
// | | |
// | +-------------------------+
// v (min.x, max.y) (max.x, max.y)
// y
//
struct Region {
Vec2f min;
Vec2f max;
template<typename T>
void Offset(const T& offset) {
min += offset.template cast<float>();
max += offset.template cast<float>();
}
Region Rounded() const {
Region result;
result.min(0) = ceil(this->min(0));
result.min(1) = ceil(this->min(1));
result.max(0) = ceil(this->max(0));
result.max(1) = ceil(this->max(1));
return result;
}
};
} // namespace mv
#endif // LIBMV_AUTOTRACK_REGION_H_

View File

@@ -1,193 +0,0 @@
// Copyright (c) 2014 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// Author: mierle@gmail.com (Keir Mierle)
#include "libmv/autotrack/tracks.h"
#include <algorithm>
#include <vector>
#include <iterator>
#include "libmv/numeric/numeric.h"
namespace mv {
Tracks::Tracks(const Tracks& other) {
markers_ = other.markers_;
}
Tracks::Tracks(const vector<Marker>& markers) : markers_(markers) {}
bool Tracks::GetMarker(int clip, int frame, int track, Marker* marker) const {
for (int i = 0; i < markers_.size(); ++i) {
if (markers_[i].clip == clip &&
markers_[i].frame == frame &&
markers_[i].track == track) {
*marker = markers_[i];
return true;
}
}
return false;
}
void Tracks::GetMarkersForTrack(int track, vector<Marker>* markers) const {
for (int i = 0; i < markers_.size(); ++i) {
if (track == markers_[i].track) {
markers->push_back(markers_[i]);
}
}
}
void Tracks::GetMarkersForTrackInClip(int clip,
int track,
vector<Marker>* markers) const {
for (int i = 0; i < markers_.size(); ++i) {
if (clip == markers_[i].clip &&
track == markers_[i].track) {
markers->push_back(markers_[i]);
}
}
}
void Tracks::GetMarkersInFrame(int clip,
int frame,
vector<Marker>* markers) const {
for (int i = 0; i < markers_.size(); ++i) {
if (markers_[i].clip == clip &&
markers_[i].frame == frame) {
markers->push_back(markers_[i]);
}
}
}
void Tracks::GetMarkersForTracksInBothImages(int clip1, int frame1,
int clip2, int frame2,
vector<Marker>* markers) const {
std::vector<int> image1_tracks;
std::vector<int> image2_tracks;
// Collect the tracks in each of the two images.
for (int i = 0; i < markers_.size(); ++i) {
int clip = markers_[i].clip;
int frame = markers_[i].frame;
if (clip == clip1 && frame == frame1) {
image1_tracks.push_back(markers_[i].track);
} else if (clip == clip2 && frame == frame2) {
image2_tracks.push_back(markers_[i].track);
}
}
// Intersect the two sets to find the tracks of interest.
std::sort(image1_tracks.begin(), image1_tracks.end());
std::sort(image2_tracks.begin(), image2_tracks.end());
std::vector<int> intersection;
std::set_intersection(image1_tracks.begin(), image1_tracks.end(),
image2_tracks.begin(), image2_tracks.end(),
std::back_inserter(intersection));
// Scan through and get the relevant tracks from the two images.
for (int i = 0; i < markers_.size(); ++i) {
// Save markers that are in either frame and are in our candidate set.
if (((markers_[i].clip == clip1 &&
markers_[i].frame == frame1) ||
(markers_[i].clip == clip2 &&
markers_[i].frame == frame2)) &&
std::binary_search(intersection.begin(),
intersection.end(),
markers_[i].track)) {
markers->push_back(markers_[i]);
}
}
}
void Tracks::AddMarker(const Marker& marker) {
// TODO(keir): This is quadratic for repeated insertions. Fix this by adding
// a smarter data structure like a set<>.
for (int i = 0; i < markers_.size(); ++i) {
if (markers_[i].clip == marker.clip &&
markers_[i].frame == marker.frame &&
markers_[i].track == marker.track) {
markers_[i] = marker;
return;
}
}
markers_.push_back(marker);
}
void Tracks::SetMarkers(vector<Marker>* markers) {
std::swap(markers_, *markers);
}
bool Tracks::RemoveMarker(int clip, int frame, int track) {
int size = markers_.size();
for (int i = 0; i < markers_.size(); ++i) {
if (markers_[i].clip == clip &&
markers_[i].frame == frame &&
markers_[i].track == track) {
markers_[i] = markers_[size - 1];
markers_.resize(size - 1);
return true;
}
}
return false;
}
void Tracks::RemoveMarkersForTrack(int track) {
int size = 0;
for (int i = 0; i < markers_.size(); ++i) {
if (markers_[i].track != track) {
markers_[size++] = markers_[i];
}
}
markers_.resize(size);
}
int Tracks::MaxClip() const {
int max_clip = 0;
for (int i = 0; i < markers_.size(); ++i) {
max_clip = std::max(markers_[i].clip, max_clip);
}
return max_clip;
}
int Tracks::MaxFrame(int clip) const {
int max_frame = 0;
for (int i = 0; i < markers_.size(); ++i) {
if (markers_[i].clip == clip) {
max_frame = std::max(markers_[i].frame, max_frame);
}
}
return max_frame;
}
int Tracks::MaxTrack() const {
int max_track = 0;
for (int i = 0; i < markers_.size(); ++i) {
max_track = std::max(markers_[i].track, max_track);
}
return max_track;
}
int Tracks::NumMarkers() const {
return markers_.size();
}
} // namespace mv

View File

@@ -1,82 +0,0 @@
// Copyright (c) 2014 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// Author: mierle@gmail.com (Keir Mierle)
#ifndef LIBMV_AUTOTRACK_TRACKS_H_
#define LIBMV_AUTOTRACK_TRACKS_H_
#include "libmv/base/vector.h"
#include "libmv/autotrack/marker.h"
namespace mv {
using libmv::vector;
// The Tracks container stores correspondences between frames.
class Tracks {
public:
Tracks() { }
Tracks(const Tracks &other);
// Create a tracks object with markers already initialized. Copies markers.
explicit Tracks(const vector<Marker>& markers);
// All getters append to the output argument vector.
bool GetMarker(int clip, int frame, int track, Marker* marker) const;
void GetMarkersForTrack(int track, vector<Marker>* markers) const;
void GetMarkersForTrackInClip(int clip,
int track,
vector<Marker>* markers) const;
void GetMarkersInFrame(int clip, int frame, vector<Marker>* markers) const;
// Get the markers in frame1 and frame2 which have a common track.
//
// This is not the same as the union of the markers in frame1 and
// frame2; each marker is for a track that appears in both images.
void GetMarkersForTracksInBothImages(int clip1, int frame1,
int clip2, int frame2,
vector<Marker>* markers) const;
void AddMarker(const Marker& marker);
// Moves the contents of *markers over top of the existing markers. This
// destroys *markers in the process (but avoids copies).
void SetMarkers(vector<Marker>* markers);
bool RemoveMarker(int clip, int frame, int track);
void RemoveMarkersForTrack(int track);
int MaxClip() const;
int MaxFrame(int clip) const;
int MaxTrack() const;
int NumMarkers() const;
const vector<Marker>& markers() const { return markers_; }
private:
vector<Marker> markers_;
// TODO(keir): Consider adding access-map data structures to avoid all the
// linear lookup penalties for the accessors.
};
} // namespace mv
#endif // LIBMV_AUTOTRACK_TRACKS_H_

View File

@@ -1,52 +0,0 @@
// Copyright (c) 2014 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// Author: mierle@gmail.com (Keir Mierle)
#include "libmv/autotrack/tracks.h"
#include "testing/testing.h"
#include "libmv/logging/logging.h"
namespace mv {
TEST(Tracks, MaxFrame) {
Marker marker;
Tracks tracks;
// Add some markers to clip 0.
marker.clip = 0;
marker.frame = 1;
tracks.AddMarker(marker);
// Add some markers to clip 1.
marker.clip = 1;
marker.frame = 1;
tracks.AddMarker(marker);
marker.clip = 1;
marker.frame = 12;
tracks.AddMarker(marker);
EXPECT_EQ(1, tracks.MaxFrame(0));
EXPECT_EQ(12, tracks.MaxFrame(1));
}
} // namespace mv

View File

@@ -1,74 +0,0 @@
// Copyright (c) 2014 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/base/aligned_malloc.h"
#include "libmv/logging/logging.h"
#if !defined(__APPLE__) && !defined(__FreeBSD__) && !defined(__NetBSD__)
// Needed for memalign on Linux and _aligned_alloc on Windows.
# ifdef FREE_WINDOWS
/* make sure _aligned_malloc is included */
# ifdef __MSVCRT_VERSION__
# undef __MSVCRT_VERSION__
# endif
# define __MSVCRT_VERSION__ 0x0700
# endif // FREE_WINDOWS
# include <malloc.h>
#else
// Apple's malloc is 16-byte aligned, and does not have malloc.h, so include
// stdilb instead.
# include <cstdlib>
#endif
namespace libmv {
void *aligned_malloc(int size, int alignment) {
#ifdef _WIN32
return _aligned_malloc(size, alignment);
#elif __APPLE__
// On Mac OS X, both the heap and the stack are guaranteed 16-byte aligned so
// they work natively with SSE types with no further work.
CHECK_EQ(alignment, 16);
return malloc(size);
#elif defined(__FreeBSD__) || defined(__NetBSD__)
void *result;
if (posix_memalign(&result, alignment, size)) {
// non-zero means allocation error
// either no allocation or bad alignment value
return NULL;
}
return result;
#else // This is for Linux.
return memalign(alignment, size);
#endif
}
void aligned_free(void *ptr) {
#ifdef _WIN32
_aligned_free(ptr);
#else
free(ptr);
#endif
}
} // namespace libmv

View File

@@ -1,34 +0,0 @@
// Copyright (c) 2014 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_BASE_ALIGNED_MALLOC_H_
#define LIBMV_BASE_ALIGNED_MALLOC_H_
namespace libmv {
// Allocate block of size bytes at least aligned to a given value.
void *aligned_malloc(int size, int alignment);
// Free memory allocated by aligned_malloc.
void aligned_free(void *ptr);
} // namespace libmv
#endif // LIBMV_BASE_ALIGNED_MALLOC_H_

View File

@@ -1,37 +0,0 @@
// Copyright (c) 2007, 2008 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_ID_GENERATOR_H
#define LIBMV_ID_GENERATOR_H
namespace libmv {
template <typename ID>
class IdGenerator {
public:
IdGenerator() : next_(0) {}
ID Generate() { return next_++; }
private:
ID next_;
};
} // namespace libmv
#endif // LIBMV_ID_GENERATOR_H

View File

@@ -1,105 +0,0 @@
// Copyright (c) 2009 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_BASE_SCOPED_PTR_H
#define LIBMV_BASE_SCOPED_PTR_H
#include <cassert>
#include <cstddef>
namespace libmv {
/**
* A handle for a heap-allocated resource that should be freed when it goes out
* of scope. This looks similar to the one found in TR1.
*/
template<typename T>
class scoped_ptr {
public:
scoped_ptr(T *resource) : resource_(resource) {}
~scoped_ptr() { reset(0); }
T *get() const { return resource_; }
T *operator->() const { return resource_; }
T &operator*() const { return *resource_; }
void reset(T *new_resource) {
if (sizeof(T)) {
delete resource_;
}
resource_ = new_resource;
}
T *release() {
T *released_resource = resource_;
resource_ = 0;
return released_resource;
}
private:
// No copying allowed.
T *resource_;
};
// Same as scoped_ptr but caller must allocate the data
// with new[] and the destructor will free the memory
// using delete[].
template<typename T>
class scoped_array {
public:
scoped_array(T *array) : array_(array) {}
~scoped_array() { reset(NULL); }
T *get() const { return array_; }
T& operator[](std::ptrdiff_t i) const {
assert(i >= 0);
assert(array_ != NULL);
return array_[i];
}
void reset(T *new_array) {
if (sizeof(T)) {
delete array_;
}
array_ = new_array;
}
T *release() {
T *released_array = array_;
array_ = NULL;
return released_array;
}
private:
T *array_;
// Forbid comparison of different scoped_array types.
template <typename T2> bool operator==(scoped_array<T2> const& p2) const;
template <typename T2> bool operator!=(scoped_array<T2> const& p2) const;
// Disallow evil constructors
scoped_array(const scoped_array&);
void operator=(const scoped_array&);
};
} // namespace libmv
#endif // LIBMV_BASE_SCOPED_PTR_H

View File

@@ -1,79 +0,0 @@
// Copyright (c) 2009 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/base/scoped_ptr.h"
#include "testing/testing.h"
namespace libmv {
namespace {
struct FreeMe {
FreeMe(int *freed) : freed(freed) {}
~FreeMe() { (*freed)++; }
int *freed;
};
TEST(ScopedPtr, NullDoesNothing) {
scoped_ptr<FreeMe> x(NULL);
// Does nothing.
}
TEST(ScopedPtr, FreesWhenOutOfScope) {
int frees = 0;
{
scoped_ptr<FreeMe> scoped(new FreeMe(&frees));
EXPECT_EQ(0, frees);
}
EXPECT_EQ(1, frees);
}
TEST(ScopedPtr, Operators) {
int tag = 123;
scoped_ptr<FreeMe> scoped(new FreeMe(&tag));
EXPECT_EQ(123, *(scoped->freed));
EXPECT_EQ(123, *((*scoped).freed));
}
TEST(ScopedPtr, Reset) {
int frees = 0;
scoped_ptr<FreeMe> scoped(new FreeMe(&frees));
EXPECT_EQ(0, frees);
scoped.reset(new FreeMe(&frees));
EXPECT_EQ(1, frees);
}
TEST(ScopedPtr, ReleaseAndGet) {
int frees = 0;
FreeMe *allocated = new FreeMe(&frees);
FreeMe *released = NULL;
{
scoped_ptr<FreeMe> scoped(allocated);
EXPECT_EQ(0, frees);
EXPECT_EQ(allocated, scoped.get());
released = scoped.release();
EXPECT_EQ(0, frees);
EXPECT_EQ(released, allocated);
}
EXPECT_EQ(0, frees);
delete released;
}
} // namespace
} // namespace libmv

View File

@@ -1,176 +0,0 @@
// Copyright (c) 2007, 2008 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// Get an aligned vector implementation. Must be included before <vector>. The
// Eigen guys went through some trouble to make a portable override for the
// fixed size vector types.
#ifndef LIBMV_BASE_VECTOR_H
#define LIBMV_BASE_VECTOR_H
#include <cstring>
#include <new>
#include <Eigen/Core>
namespace libmv {
// A simple container class, which guarantees 16 byte alignment needed for most
// vectorization. Don't use this container for classes that cannot be copied
// via memcpy.
// FIXME: this class has some issues:
// - doesn't support iterators.
// - impede compatibility with code using STL.
// - the STL already provide support for custom allocators
// it could be replaced with a simple
// template <T> class vector : std::vector<T, aligned_allocator> {} declaration
// provided it doesn't break code relying on libmv::vector specific behavior
template <typename T,
typename Allocator = Eigen::aligned_allocator<T> >
class vector {
public:
~vector() { clear(); }
vector() { init(); }
vector(int size) { init(); resize(size); }
vector(int size, const T & val) {
init();
resize(size);
std::fill(data_, data_+size_, val); }
// Copy constructor and assignment.
vector(const vector<T, Allocator> &rhs) {
init();
copy(rhs);
}
vector<T, Allocator> &operator=(const vector<T, Allocator> &rhs) {
if (&rhs != this) {
copy(rhs);
}
return *this;
}
/// Swaps the contents of two vectors in constant time.
void swap(vector<T, Allocator> &other) {
std::swap(allocator_, other.allocator_);
std::swap(size_, other.size_);
std::swap(capacity_, other.capacity_);
std::swap(data_, other.data_);
}
T *data() const { return data_; }
int size() const { return size_; }
int capacity() const { return capacity_; }
const T& back() const { return data_[size_ - 1]; }
T& back() { return data_[size_ - 1]; }
const T& front() const { return data_[0]; }
T& front() { return data_[0]; }
const T& operator[](int n) const { return data_[n]; }
T& operator[](int n) { return data_[n]; }
const T& at(int n) const { return data_[n]; }
T& at(int n) { return data_[n]; }
const T * begin() const { return data_; }
const T * end() const { return data_+size_; }
T * begin() { return data_; }
T * end() { return data_+size_; }
void resize(size_t size) {
reserve(size);
if (size > size_) {
construct(size_, size);
} else if (size < size_) {
destruct(size, size_);
}
size_ = size;
}
void push_back(const T &value) {
if (size_ == capacity_) {
reserve(size_ ? 2 * size_ : 1);
}
new (&data_[size_++]) T(value);
}
void pop_back() {
resize(size_ - 1);
}
void clear() {
destruct(0, size_);
deallocate();
init();
}
void reserve(unsigned int size) {
if (size > size_) {
T *data = static_cast<T *>(allocate(size));
memcpy(data, data_, sizeof(*data)*size_);
allocator_.deallocate(data_, capacity_);
data_ = data;
capacity_ = size;
}
}
bool empty() {
return size_ == 0;
}
private:
void construct(int start, int end) {
for (int i = start; i < end; ++i) {
new (&data_[i]) T;
}
}
void destruct(int start, int end) {
for (int i = start; i < end; ++i) {
data_[i].~T();
}
}
void init() {
size_ = 0;
data_ = 0;
capacity_ = 0;
}
void *allocate(int size) {
return size ? allocator_.allocate(size) : 0;
}
void deallocate() {
allocator_.deallocate(data_, size_);
data_ = 0;
}
void copy(const vector<T, Allocator> &rhs) {
resize(rhs.size());
for (int i = 0; i < rhs.size(); ++i) {
(*this)[i] = rhs[i];
}
}
Allocator allocator_;
size_t size_;
size_t capacity_;
T *data_;
};
} // namespace libmv
#endif // LIBMV_BASE_VECTOR_H

View File

@@ -1,223 +0,0 @@
// Copyright (c) 2009 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/base/vector.h"
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
#include <algorithm>
namespace {
using namespace libmv;
// This uses a Vec2d which is a fixed-size vectorizable Eigen type. It is
// necessary to test vectorizable types to ensure that the alignment asserts
// trigger if the alignment is not correct.
TEST(VectorAlignmentTest, PushBack) {
Vec2 x1, x2;
x1 << 1, 2;
x2 << 3, 4;
vector<Vec2> vs;
vs.push_back(x1);
EXPECT_EQ(1, vs.size());
EXPECT_EQ(1, vs.capacity());
vs.push_back(x2);
EXPECT_EQ(2, vs.size());
EXPECT_EQ(2, vs.capacity());
// The following is necessary because of some bug in gtest; the expected
// parameter can't be a fixed size vectorizable type with alignment
// requirements.
Vec x1r = x1;
Vec x2r = x2;
EXPECT_EQ(x1r, vs[0]);
EXPECT_EQ(x2r, vs[1]);
vs.push_back(x2);
vs.push_back(x2);
vs.push_back(x2);
EXPECT_EQ(5, vs.size());
EXPECT_EQ(8, vs.capacity());
}
// Count the number of destruct calls to test that the destructor gets called.
int foo_construct_calls = 0;
int foo_destruct_calls = 0;
struct Foo {
public:
Foo() : value(5) { foo_construct_calls++; }
~Foo() { foo_destruct_calls++; }
int value;
};
struct VectorTest : public testing::Test {
VectorTest() {
foo_construct_calls = 0;
foo_destruct_calls = 0;
}
};
TEST_F(VectorTest, EmptyVectorDoesNotConstruct) {
{
vector<Foo> v;
EXPECT_EQ(0, v.size());
EXPECT_EQ(0, v.capacity());
}
EXPECT_EQ(0, foo_construct_calls);
EXPECT_EQ(0, foo_destruct_calls);
}
TEST_F(VectorTest, DestructorGetsCalled) {
{
vector<Foo> v;
v.resize(5);
}
EXPECT_EQ(5, foo_construct_calls);
EXPECT_EQ(5, foo_destruct_calls);
}
TEST_F(VectorTest, ReserveDoesNotCallConstructorsOrDestructors) {
vector<Foo> v;
EXPECT_EQ(0, v.size());
EXPECT_EQ(0, v.capacity());
EXPECT_EQ(0, foo_construct_calls);
EXPECT_EQ(0, foo_destruct_calls);
v.reserve(5);
EXPECT_EQ(0, v.size());
EXPECT_EQ(5, v.capacity());
EXPECT_EQ(0, foo_construct_calls);
EXPECT_EQ(0, foo_destruct_calls);
}
TEST_F(VectorTest, ResizeConstructsAndDestructsAsExpected) {
vector<Foo> v;
// Create one object.
v.resize(1);
EXPECT_EQ(1, v.size());
EXPECT_EQ(1, v.capacity());
EXPECT_EQ(1, foo_construct_calls);
EXPECT_EQ(0, foo_destruct_calls);
EXPECT_EQ(5, v[0].value);
// Create two more.
v.resize(3);
EXPECT_EQ(3, v.size());
EXPECT_EQ(3, v.capacity());
EXPECT_EQ(3, foo_construct_calls);
EXPECT_EQ(0, foo_destruct_calls);
// Delete the last one.
v.resize(2);
EXPECT_EQ(2, v.size());
EXPECT_EQ(3, v.capacity());
EXPECT_EQ(3, foo_construct_calls);
EXPECT_EQ(1, foo_destruct_calls);
// Delete the remaining two.
v.resize(0);
EXPECT_EQ(0, v.size());
EXPECT_EQ(3, v.capacity());
EXPECT_EQ(3, foo_construct_calls);
EXPECT_EQ(3, foo_destruct_calls);
}
TEST_F(VectorTest, PushPopBack) {
vector<Foo> v;
Foo foo;
foo.value = 10;
v.push_back(foo);
EXPECT_EQ(1, v.size());
EXPECT_EQ(10, v.back().value);
v.pop_back();
EXPECT_EQ(0, v.size());
EXPECT_EQ(1, foo_construct_calls);
EXPECT_EQ(1, foo_destruct_calls);
}
TEST_F(VectorTest, CopyConstructor) {
vector<int> a;
a.push_back(1);
a.push_back(5);
a.push_back(3);
vector<int> b(a);
EXPECT_EQ(a.size(), b.size());
//EXPECT_EQ(a.capacity(), b.capacity());
for (int i = 0; i < a.size(); ++i) {
EXPECT_EQ(a[i], b[i]);
}
}
TEST_F(VectorTest, OperatorEquals) {
vector<int> a, b;
a.push_back(1);
a.push_back(5);
a.push_back(3);
b = a;
EXPECT_EQ(a.size(), b.size());
//EXPECT_EQ(a.capacity(), b.capacity());
for (int i = 0; i < a.size(); ++i) {
EXPECT_EQ(a[i], b[i]);
}
}
TEST_F(VectorTest, STLFind) {
vector<int> a;
a.push_back(1);
a.push_back(5);
a.push_back(3);
// Find return an int *
EXPECT_EQ(std::find(&a[0], &a[2], 1) == &a[0], true);
EXPECT_EQ(std::find(&a[0], &a[2], 5) == &a[1], true);
EXPECT_EQ(std::find(&a[0], &a[2], 3) == &a[2], true);
// Find return a const int *
EXPECT_EQ(std::find(a.begin(), a.end(), 1) == &a[0], true);
EXPECT_EQ(std::find(a.begin(), a.end(), 5) == &a[1], true);
EXPECT_EQ(std::find(a.begin(), a.end(), 3) == &a[2], true);
// Search value that are not in the vector
EXPECT_EQ(std::find(a.begin(), a.end(), 0) == a.end(), true);
EXPECT_EQ(std::find(a.begin(), a.end(), 52) == a.end(), true);
}
TEST(Vector, swap) {
vector<int> a, b;
a.push_back(1);
a.push_back(2);
b.push_back(3);
a.swap(b);
EXPECT_EQ(1, a.size());
EXPECT_EQ(3, a[0]);
EXPECT_EQ(2, b.size());
EXPECT_EQ(1, b[0]);
EXPECT_EQ(2, b[1]);
}
} // namespace

View File

@@ -1,34 +0,0 @@
// Copyright (c) 2009 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_BASE_VECTOR_UTILS_H_
#define LIBMV_BASE_VECTOR_UTILS_H_
/// Delete the contents of a container.
template <class Array>
void DeleteElements(Array *array) {
for (int i = 0; i < array->size(); ++i) {
delete (*array)[i];
}
array->clear();
}
#endif // LIBMV_BASE_VECTOR_UTILS_H_

View File

@@ -1,108 +0,0 @@
// Copyright (c) 2007, 2008 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/image/image.h"
#include <iostream>
#include <cmath>
namespace libmv {
void FloatArrayToScaledByteArray(const Array3Df &float_array,
Array3Du *byte_array,
bool automatic_range_detection
) {
byte_array->ResizeLike(float_array);
float minval = HUGE_VAL;
float maxval = -HUGE_VAL;
if (automatic_range_detection) {
for (int i = 0; i < float_array.Height(); ++i) {
for (int j = 0; j < float_array.Width(); ++j) {
for (int k = 0; k < float_array.Depth(); ++k) {
minval = std::min(minval, float_array(i, j, k));
maxval = std::max(maxval, float_array(i, j, k));
}
}
}
} else {
minval = 0;
maxval = 1;
}
for (int i = 0; i < float_array.Height(); ++i) {
for (int j = 0; j < float_array.Width(); ++j) {
for (int k = 0; k < float_array.Depth(); ++k) {
float unscaled = (float_array(i, j, k) - minval) / (maxval - minval);
(*byte_array)(i, j, k) = (unsigned char)(255 * unscaled);
}
}
}
}
void ByteArrayToScaledFloatArray(const Array3Du &byte_array,
Array3Df *float_array) {
float_array->ResizeLike(byte_array);
for (int i = 0; i < byte_array.Height(); ++i) {
for (int j = 0; j < byte_array.Width(); ++j) {
for (int k = 0; k < byte_array.Depth(); ++k) {
(*float_array)(i, j, k) = float(byte_array(i, j, k)) / 255.0f;
}
}
}
}
void SplitChannels(const Array3Df &input,
Array3Df *channel0,
Array3Df *channel1,
Array3Df *channel2) {
assert(input.Depth() >= 3);
channel0->Resize(input.Height(), input.Width());
channel1->Resize(input.Height(), input.Width());
channel2->Resize(input.Height(), input.Width());
for (int row = 0; row < input.Height(); ++row) {
for (int column = 0; column < input.Width(); ++column) {
(*channel0)(row, column) = input(row, column, 0);
(*channel1)(row, column) = input(row, column, 1);
(*channel2)(row, column) = input(row, column, 2);
}
}
}
void PrintArray(const Array3Df &array) {
using namespace std;
printf("[\n");
for (int r = 0; r < array.Height(); ++r) {
printf("[");
for (int c = 0; c < array.Width(); ++c) {
if (array.Depth() == 1) {
printf("%11f, ", array(r, c));
} else {
printf("[");
for (int k = 0; k < array.Depth(); ++k) {
printf("%11f, ", array(r, c, k));
}
printf("],");
}
}
printf("],\n");
}
printf("]\n");
}
} // namespace libmv

View File

@@ -1,497 +0,0 @@
// Copyright (c) 2007, 2008 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_IMAGE_ARRAY_ND_H
#define LIBMV_IMAGE_ARRAY_ND_H
#include <cassert>
#include <cstdio>
#include <cstring>
#include "libmv/image/tuple.h"
namespace libmv {
class BaseArray {};
/// A multidimensional array class.
template <typename T, int N>
class ArrayND : public BaseArray {
public:
typedef T Scalar;
/// Type for the multidimensional indices.
typedef Tuple<int, N> Index;
/// Create an empty array.
ArrayND() : data_(NULL), own_data_(true) { Resize(Index(0)); }
/// Create an array with the specified shape.
ArrayND(const Index &shape) : data_(NULL), own_data_(true) { Resize(shape); }
/// Create an array with the specified shape.
ArrayND(int *shape) : data_(NULL), own_data_(true) { Resize(shape); }
/// Copy constructor.
ArrayND(const ArrayND<T, N> &b) : data_(NULL), own_data_(true) {
ResizeLike(b);
std::memcpy(Data(), b.Data(), sizeof(T) * Size());
}
ArrayND(int s0) : data_(NULL), own_data_(true) { Resize(s0); }
ArrayND(int s0, int s1) : data_(NULL), own_data_(true) { Resize(s0, s1); }
ArrayND(int s0, int s1, int s2) : data_(NULL), own_data_(true) {
Resize(s0, s1, s2);
}
ArrayND(T* data, int s0, int s1, int s2)
: shape_(0), strides_(0), data_(data), own_data_(false) {
Resize(s0, s1, s2);
}
/// Destructor deletes pixel data.
~ArrayND() {
if (own_data_) {
delete [] data_;
}
}
/// Assignation copies pixel data.
ArrayND &operator=(const ArrayND<T, N> &b) {
assert(this != &b);
ResizeLike(b);
std::memcpy(Data(), b.Data(), sizeof(T) * Size());
return *this;
}
const Index &Shapes() const {
return shape_;
}
const Index &Strides() const {
return strides_;
}
/// Create an array of shape s.
void Resize(const Index &new_shape) {
if (data_ != NULL && shape_ == new_shape) {
// Don't bother realloacting if the shapes match.
return;
}
shape_.Reset(new_shape);
strides_(N - 1) = 1;
for (int i = N - 1; i > 0; --i) {
strides_(i - 1) = strides_(i) * shape_(i);
}
if (own_data_) {
delete [] data_;
data_ = NULL;
if (Size() > 0) {
data_ = new T[Size()];
}
}
}
template<typename D>
void ResizeLike(const ArrayND<D, N> &other) {
Resize(other.Shape());
}
/// Resizes the array to shape s. All data is lost.
void Resize(const int *new_shape_array) {
Resize(Index(new_shape_array));
}
/// Resize a 1D array to length s0.
void Resize(int s0) {
assert(N == 1);
int shape[] = {s0};
Resize(shape);
}
/// Resize a 2D array to shape (s0,s1).
void Resize(int s0, int s1) {
int shape[N] = {s0, s1};
for (int i = 2; i < N; ++i) {
shape[i] = 1;
}
Resize(shape);
}
// Match Eigen2's API.
void resize(int rows, int cols) {
Resize(rows, cols);
}
/// Resize a 3D array to shape (s0,s1,s2).
void Resize(int s0, int s1, int s2) {
assert(N == 3);
int shape[] = {s0, s1, s2};
Resize(shape);
}
template<typename D>
void CopyFrom(const ArrayND<D, N> &other) {
ResizeLike(other);
T *data = Data();
const D *other_data = other.Data();
for (int i = 0; i < Size(); ++i) {
data[i] = T(other_data[i]);
}
}
void Fill(T value) {
for (int i = 0; i < Size(); ++i) {
Data()[i] = value;
}
}
// Match Eigen's API.
void fill(T value) {
for (int i = 0; i < Size(); ++i) {
Data()[i] = value;
}
}
/// Return a tuple containing the length of each axis.
const Index &Shape() const {
return shape_;
}
/// Return the length of an axis.
int Shape(int axis) const {
return shape_(axis);
}
/// Return the distance between neighboring elements along axis.
int Stride(int axis) const {
return strides_(axis);
}
/// Return the number of elements of the array.
int Size() const {
int size = 1;
for (int i = 0; i < N; ++i)
size *= Shape(i);
return size;
}
/// Return the total amount of memory used by the array.
int MemorySizeInBytes() const {
return sizeof(*this) + Size() * sizeof(T);
}
/// Pointer to the first element of the array.
T *Data() { return data_; }
/// Constant pointer to the first element of the array.
const T *Data() const { return data_; }
/// Distance between the first element and the element at position index.
int Offset(const Index &index) const {
int offset = 0;
for (int i = 0; i < N; ++i)
offset += index(i) * Stride(i);
return offset;
}
/// 1D specialization.
int Offset(int i0) const {
assert(N == 1);
return i0 * Stride(0);
}
/// 2D specialization.
int Offset(int i0, int i1) const {
assert(N == 2);
return i0 * Stride(0) + i1 * Stride(1);
}
/// 3D specialization.
int Offset(int i0, int i1, int i2) const {
assert(N == 3);
return i0 * Stride(0) + i1 * Stride(1) + i2 * Stride(2);
}
/// Return a reference to the element at position index.
T &operator()(const Index &index) {
// TODO(pau) Boundary checking in debug mode.
return *( Data() + Offset(index) );
}
/// 1D specialization.
T &operator()(int i0) {
return *( Data() + Offset(i0) );
}
/// 2D specialization.
T &operator()(int i0, int i1) {
assert(0 <= i0 && i0 < Shape(0));
assert(0 <= i1 && i1 < Shape(1));
return *(Data() + Offset(i0, i1));
}
/// 3D specialization.
T &operator()(int i0, int i1, int i2) {
assert(0 <= i0 && i0 < Shape(0));
assert(0 <= i1 && i1 < Shape(1));
assert(0 <= i2 && i2 < Shape(2));
return *(Data() + Offset(i0, i1, i2));
}
/// Return a constant reference to the element at position index.
const T &operator()(const Index &index) const {
return *(Data() + Offset(index));
}
/// 1D specialization.
const T &operator()(int i0) const {
return *(Data() + Offset(i0));
}
/// 2D specialization.
const T &operator()(int i0, int i1) const {
assert(0 <= i0 && i0 < Shape(0));
assert(0 <= i1 && i1 < Shape(1));
return *(Data() + Offset(i0, i1));
}
/// 3D specialization.
const T &operator()(int i0, int i1, int i2) const {
return *(Data() + Offset(i0, i1, i2));
}
/// True if index is inside array.
bool Contains(const Index &index) const {
for (int i = 0; i < N; ++i)
if (index(i) < 0 || index(i) >= Shape(i))
return false;
return true;
}
/// 1D specialization.
bool Contains(int i0) const {
return 0 <= i0 && i0 < Shape(0);
}
/// 2D specialization.
bool Contains(int i0, int i1) const {
return 0 <= i0 && i0 < Shape(0)
&& 0 <= i1 && i1 < Shape(1);
}
/// 3D specialization.
bool Contains(int i0, int i1, int i2) const {
return 0 <= i0 && i0 < Shape(0)
&& 0 <= i1 && i1 < Shape(1)
&& 0 <= i2 && i2 < Shape(2);
}
bool operator==(const ArrayND<T, N> &other) const {
if (shape_ != other.shape_) return false;
if (strides_ != other.strides_) return false;
for (int i = 0; i < Size(); ++i) {
if (this->Data()[i] != other.Data()[i])
return false;
}
return true;
}
bool operator!=(const ArrayND<T, N> &other) const {
return !(*this == other);
}
ArrayND<T, N> operator*(const ArrayND<T, N> &other) const {
assert(Shape() = other.Shape());
ArrayND<T, N> res;
res.ResizeLike(*this);
for (int i = 0; i < res.Size(); ++i) {
res.Data()[i] = Data()[i] * other.Data()[i];
}
return res;
}
protected:
/// The number of element in each dimension.
Index shape_;
/// How to jump to neighbors in each dimension.
Index strides_;
/// Pointer to the first element of the array.
T *data_;
/// Flag if this Array either own or reference the data
bool own_data_;
};
/// 3D array (row, column, channel).
template <typename T>
class Array3D : public ArrayND<T, 3> {
typedef ArrayND<T, 3> Base;
public:
Array3D()
: Base() {
}
Array3D(int height, int width, int depth = 1)
: Base(height, width, depth) {
}
Array3D(T* data, int height, int width, int depth = 1)
: Base(data, height, width, depth) {
}
void Resize(int height, int width, int depth = 1) {
Base::Resize(height, width, depth);
}
int Height() const {
return Base::Shape(0);
}
int Width() const {
return Base::Shape(1);
}
int Depth() const {
return Base::Shape(2);
}
// Match Eigen2's API so that Array3D's and Mat*'s can work together via
// template magic.
int rows() const { return Height(); }
int cols() const { return Width(); }
int depth() const { return Depth(); }
int Get_Step() const { return Width()*Depth(); }
/// Enable accessing with 2 indices for grayscale images.
T &operator()(int i0, int i1, int i2 = 0) {
assert(0 <= i0 && i0 < Height());
assert(0 <= i1 && i1 < Width());
return Base::operator()(i0, i1, i2);
}
const T &operator()(int i0, int i1, int i2 = 0) const {
assert(0 <= i0 && i0 < Height());
assert(0 <= i1 && i1 < Width());
return Base::operator()(i0, i1, i2);
}
};
typedef Array3D<unsigned char> Array3Du;
typedef Array3D<unsigned int> Array3Dui;
typedef Array3D<int> Array3Di;
typedef Array3D<float> Array3Df;
typedef Array3D<short> Array3Ds;
void SplitChannels(const Array3Df &input,
Array3Df *channel0,
Array3Df *channel1,
Array3Df *channel2);
void PrintArray(const Array3Df &array);
/** Convert a float array into a byte array by scaling values by 255* (max-min).
* where max and min are automatically detected
* (if automatic_range_detection = true)
* \note and TODO this automatic detection only works when the image contains
* at least one pixel of both bounds.
**/
void FloatArrayToScaledByteArray(const Array3Df &float_array,
Array3Du *byte_array,
bool automatic_range_detection = false);
//! Convert a byte array into a float array by dividing values by 255.
void ByteArrayToScaledFloatArray(const Array3Du &byte_array,
Array3Df *float_array);
template <typename AArrayType, typename BArrayType, typename CArrayType>
void MultiplyElements(const AArrayType &a,
const BArrayType &b,
CArrayType *c) {
// This function does an element-wise multiply between
// the two Arrays A and B, and stores the result in C.
// A and B must have the same dimensions.
assert(a.Shape() == b.Shape());
c->ResizeLike(a);
// To perform the multiplcation, a "current" index into the N-dimensions of
// the A and B matrix specifies which elements are being multiplied.
typename CArrayType::Index index;
// The index starts at the maximum value for each dimension
const typename CArrayType::Index& cShape = c->Shape();
for ( int i = 0; i < CArrayType::Index::SIZE; ++i )
index(i) = cShape(i) - 1;
// After each multiplication, the highest-dimensional index is reduced.
// if this reduces it less than zero, it resets to its maximum value
// and decrements the index of the next lower dimension.
// This ripple-action continues until the entire new array has been
// calculated, indicated by dimension zero having a negative index.
while ( index(0) >= 0 ) {
(*c)(index) = a(index) * b(index);
int dimension = CArrayType::Index::SIZE - 1;
index(dimension) = index(dimension) - 1;
while ( dimension > 0 && index(dimension) < 0 ) {
index(dimension) = cShape(dimension) - 1;
index(dimension - 1) = index(dimension - 1) - 1;
--dimension;
}
}
}
template <typename TA, typename TB, typename TC>
void MultiplyElements(const ArrayND<TA, 3> &a,
const ArrayND<TB, 3> &b,
ArrayND<TC, 3> *c) {
// Specialization for N==3
c->ResizeLike(a);
assert(a.Shape(0) == b.Shape(0));
assert(a.Shape(1) == b.Shape(1));
assert(a.Shape(2) == b.Shape(2));
for (int i = 0; i < a.Shape(0); ++i) {
for (int j = 0; j < a.Shape(1); ++j) {
for (int k = 0; k < a.Shape(2); ++k) {
(*c)(i, j, k) = TC(a(i, j, k) * b(i, j, k));
}
}
}
}
template <typename TA, typename TB, typename TC>
void MultiplyElements(const Array3D<TA> &a,
const Array3D<TB> &b,
Array3D<TC> *c) {
// Specialization for N==3
c->ResizeLike(a);
assert(a.Shape(0) == b.Shape(0));
assert(a.Shape(1) == b.Shape(1));
assert(a.Shape(2) == b.Shape(2));
for (int i = 0; i < a.Shape(0); ++i) {
for (int j = 0; j < a.Shape(1); ++j) {
for (int k = 0; k < a.Shape(2); ++k) {
(*c)(i, j, k) = TC(a(i, j, k) * b(i, j, k));
}
}
}
}
} // namespace libmv
#endif // LIBMV_IMAGE_ARRAY_ND_H

View File

@@ -1,324 +0,0 @@
// Copyright (c) 2007, 2008 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/image/array_nd.h"
#include "testing/testing.h"
using libmv::ArrayND;
using libmv::Array3D;
using libmv::Array3Df;
namespace {
TEST(ArrayND, EmptyConstructor) {
ArrayND<int, 2> a;
EXPECT_EQ(0, a.Shape(0));
EXPECT_EQ(0, a.Shape(1));
}
TEST(ArrayND, IndexConstructor) {
int s[] = {1, 2, 3};
ArrayND<int, 3>::Index shape(s);
ArrayND<int, 3> a(shape);
EXPECT_EQ(1, a.Shape(0));
EXPECT_EQ(2, a.Shape(1));
EXPECT_EQ(3, a.Shape(2));
}
TEST(ArrayND, PointerConstructor) {
int s[] = {1, 2, 3};
ArrayND<int, 3> a(s);
EXPECT_EQ(1, a.Shape(0));
EXPECT_EQ(2, a.Shape(1));
EXPECT_EQ(3, a.Shape(2));
}
TEST(ArrayND, CopyConstructor) {
int s[] = {1, 2, 3};
ArrayND<int, 3> a(s);
a(0, 1, 1) = 3;
a(0, 1, 2) = 3;
ArrayND<int, 3> b(a);
EXPECT_EQ(1, b.Shape(0));
EXPECT_EQ(2, b.Shape(1));
EXPECT_EQ(3, b.Shape(2));
EXPECT_EQ(3, b(0, 1, 1));
b(0, 1, 2) = 2;
EXPECT_EQ(3, a(0, 1, 2));
}
TEST(ArrayND, Assignation) {
int s[] = {1, 2, 3};
ArrayND<int, 3> a(s);
a(0, 1, 1) = 3;
a(0, 1, 2) = 3;
ArrayND<int, 3> b;
b = a;
EXPECT_EQ(1, b.Shape(0));
EXPECT_EQ(2, b.Shape(1));
EXPECT_EQ(3, b.Shape(2));
EXPECT_EQ(3, b(0, 1, 1));
b(0, 1, 2) = 2;
EXPECT_EQ(3, a(0, 1, 2));
}
TEST(ArrayND, Fill) {
int s[] = {2, 2};
ArrayND<int, 2> a(s);
a.Fill(42);
EXPECT_EQ(42, a(0, 0));
EXPECT_EQ(42, a(0, 1));
EXPECT_EQ(42, a(1, 0));
EXPECT_EQ(42, a(1, 1));
}
TEST(ArrayND, Size) {
int s[] = {1, 2, 3};
ArrayND<int, 3>::Index shape(s);
ArrayND<int, 3> a(shape);
int l[] = {0, 1, 2};
ArrayND<int, 3>::Index last(l);
EXPECT_EQ(a.Size(), a.Offset(last)+1);
EXPECT_TRUE(a.Contains(last));
EXPECT_FALSE(a.Contains(shape));
}
TEST(ArrayND, MemorySizeInBytes) {
int s[] = {2, 3};
ArrayND<int, 2>::Index shape(s);
ArrayND<int, 2> a(shape);
int size = 24 + sizeof(a);
EXPECT_EQ(size, a.MemorySizeInBytes());
}
TEST(ArrayND, Parenthesis) {
typedef ArrayND<int, 2>::Index Index;
int s[] = {3, 3};
ArrayND<int, 2> a(s);
*(a.Data()+0) = 0;
*(a.Data()+5) = 5;
int i1[] = {0, 0};
EXPECT_EQ(0, a(Index(i1)));
int i2[] = {1, 2};
EXPECT_EQ(5, a(Index(i2)));
}
TEST(ArrayND, 1DConstructor) {
ArrayND<int, 1> a(3);
EXPECT_EQ(3, a.Shape(0));
}
TEST(ArrayND, 2DConstructor) {
ArrayND<int, 2> a(1, 2);
EXPECT_EQ(1, a.Shape(0));
EXPECT_EQ(2, a.Shape(1));
}
TEST(ArrayND, 3DConstructor) {
ArrayND<int, 3> a(1, 2, 3);
EXPECT_EQ(1, a.Shape(0));
EXPECT_EQ(2, a.Shape(1));
EXPECT_EQ(3, a.Shape(2));
}
TEST(ArrayND, 1DAccessor) {
ArrayND<int, 1> a(3);
a(0) = 1;
a(1) = 2;
EXPECT_EQ(1, a(0));
EXPECT_EQ(2, a(1));
EXPECT_EQ(1, *(a.Data()));
EXPECT_EQ(2, *(a.Data() + a.Stride(0)));
}
TEST(ArrayND, 2DAccessor) {
ArrayND<int, 2> a(3, 3);
a(0, 0) = 1;
a(1, 1) = 2;
EXPECT_EQ(1, a(0, 0));
EXPECT_EQ(2, a(1, 1));
EXPECT_EQ(1, *(a.Data()));
EXPECT_EQ(2, *(a.Data() + a.Stride(0) + a.Stride(1)));
}
TEST(ArrayND, 3DAccessor) {
ArrayND<int, 3> a(3, 3, 3);
a(0, 0, 0) = 1;
a(1, 1, 1) = 2;
EXPECT_EQ(1, a(0, 0, 0));
EXPECT_EQ(2, a(1, 1, 1));
EXPECT_EQ(1, *(a.Data()));
EXPECT_EQ(2, *(a.Data() + a.Stride(0) + a.Stride(1) + a.Stride(2)));
}
TEST(ArrayND, CopyFrom) {
ArrayND<int, 3> a(2, 2, 1);
a(0, 0, 0) = 1;
a(0, 1, 0) = 2;
a(1, 0, 0) = 3;
a(1, 1, 0) = 4;
ArrayND<float, 3> b;
b.CopyFrom(a);
EXPECT_FLOAT_EQ(1.f, b(0, 0, 0));
EXPECT_FLOAT_EQ(2.f, b(0, 1, 0));
EXPECT_FLOAT_EQ(3.f, b(1, 0, 0));
EXPECT_FLOAT_EQ(4.f, b(1, 1, 0));
}
TEST(ArrayND, MultiplyElements) {
ArrayND<int, 3> a(2, 2, 1);
a(0, 0, 0) = 1;
a(0, 1, 0) = 2;
a(1, 0, 0) = 3;
a(1, 1, 0) = 4;
ArrayND<int, 3> b(2, 2, 1);
b(0, 0, 0) = 6;
b(0, 1, 0) = 5;
b(1, 0, 0) = 4;
b(1, 1, 0) = 3;
ArrayND<int, 3> c;
MultiplyElements(a, b, &c);
EXPECT_FLOAT_EQ(6, c(0, 0, 0));
EXPECT_FLOAT_EQ(10, c(0, 1, 0));
EXPECT_FLOAT_EQ(12, c(1, 0, 0));
EXPECT_FLOAT_EQ(12, c(1, 1, 0));
}
TEST(ArrayND, IsEqualOperator) {
ArrayND<int, 3> a(2, 2, 1);
a(0, 0, 0) = 1;
a(0, 1, 0) = 2;
a(1, 0, 0) = 3;
a(1, 1, 0) = 4;
ArrayND<int, 3> b(2, 2, 1);
b(0, 0, 0) = 1;
b(0, 1, 0) = 2;
b(1, 0, 0) = 3;
b(1, 1, 0) = 4;
EXPECT_TRUE(a == b);
EXPECT_FALSE(a != b);
b(1, 1, 0) = 5;
EXPECT_TRUE(a != b);
EXPECT_FALSE(a == b);
}
TEST(Array3D, Sizes) {
Array3D<int> array;
EXPECT_EQ(array.Height(), 0);
EXPECT_EQ(array.Width(), 0);
EXPECT_EQ(array.Depth(), 0);
EXPECT_EQ(array.Shape(0), 0);
}
TEST(Array3D, CopyConstructor) {
Array3D<int> array(10, 10);
array(0, 0) = 1;
array(0, 1) = 1;
Array3D<int> copy(array);
EXPECT_EQ(copy.Height(), 10);
EXPECT_EQ(copy.Width(), 10);
EXPECT_EQ(copy.Depth(), 1);
EXPECT_EQ(copy(0, 0), 1);
copy(0, 1) = 2;
EXPECT_EQ(array(0, 1), 1);
}
TEST(Array3D, Assignation) {
Array3D<int> array(10, 10);
array(0, 0) = 1;
array(0, 1) = 1;
Array3D<int> copy;
copy = array;
EXPECT_EQ(copy.Height(), 10);
EXPECT_EQ(copy.Width(), 10);
EXPECT_EQ(copy.Depth(), 1);
EXPECT_EQ(copy(0, 0), 1);
copy(0, 1) = 2;
EXPECT_EQ(array(0, 1), 1);
}
TEST(Array3D, Parenthesis) {
Array3D<int> array(1, 2, 3);
array(0, 1, 0) = 3;
EXPECT_EQ(array(0, 1), 3);
}
TEST(Array3Df, SplitChannels) {
Array3Df array(1, 2, 3);
array(0, 0, 0) = 1;
array(0, 1, 0) = 1;
array(0, 0, 1) = 2;
array(0, 1, 1) = 2;
array(0, 0, 2) = 3;
array(0, 1, 2) = 3;
Array3Df c0, c1, c2;
SplitChannels(array, &c0, &c1, &c2);
for (int row = 0; row < 1; ++row) {
for (int column = 0; column < 2; ++column) {
EXPECT_EQ(array(row, column, 0), c0(row, column));
EXPECT_EQ(array(row, column, 1), c1(row, column));
EXPECT_EQ(array(row, column, 2), c2(row, column));
}
}
}
TEST(ArrayND, MultiplyElementsGeneric) {
ArrayND<double, 5> A;
ArrayND<int, 5> B;
ArrayND<double, 5> C;
int shape[] = {1, 3, 5, 7, 1};
A.Resize(shape);
B.Resize(shape);
A.Fill(1.1);
B.Fill(2);
MultiplyElements(A, B, &C);
ArrayND<double, 5>::Index cIndex;
for (int d0 = 0; d0 < shape[0]; ++d0)
for (int d1 = 0; d1 < shape[1]; ++d1)
for (int d2 = 0; d2 < shape[2]; ++d2)
for (int d3 = 0; d3 < shape[3]; ++d3)
for (int d4 = 0; d4 < shape[4]; ++d4) {
cIndex(0) = d0;
cIndex(1) = d1;
cIndex(2) = d2;
cIndex(3) = d3;
cIndex(4) = d4;
EXPECT_EQ(2.2, C(cIndex));
}
}
} // namespace

View File

@@ -1,344 +0,0 @@
// Copyright (c) 2007, 2008 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/image/convolve.h"
#include <cmath>
#include "libmv/image/image.h"
namespace libmv {
// Compute a Gaussian kernel and derivative, such that you can take the
// derivative of an image by convolving with the kernel horizontally then the
// derivative vertically to get (eg) the y derivative.
void ComputeGaussianKernel(double sigma, Vec *kernel, Vec *derivative) {
assert(sigma >= 0.0);
// 0.004 implies a 3 pixel kernel with 1 pixel sigma.
const float truncation_factor = 0.004f;
// Calculate the kernel size based on sigma such that it is odd.
float precisehalfwidth = GaussianInversePositive(truncation_factor, sigma);
int width = lround(2*precisehalfwidth);
if (width % 2 == 0) {
width++;
}
// Calculate the gaussian kernel and its derivative.
kernel->resize(width);
derivative->resize(width);
kernel->setZero();
derivative->setZero();
int halfwidth = width / 2;
for (int i = -halfwidth; i <= halfwidth; ++i) {
(*kernel)(i + halfwidth) = Gaussian(i, sigma);
(*derivative)(i + halfwidth) = GaussianDerivative(i, sigma);
}
// Since images should not get brighter or darker, normalize.
NormalizeL1(kernel);
// Normalize the derivative differently. See
// www.cs.duke.edu/courses/spring03/cps296.1/handouts/Image%20Processing.pdf
double factor = 0.;
for (int i = -halfwidth; i <= halfwidth; ++i) {
factor -= i*(*derivative)(i+halfwidth);
}
*derivative /= factor;
}
template <int size, bool vertical>
void FastConvolve(const Vec &kernel, int width, int height,
const float* src, int src_stride, int src_line_stride,
float* dst, int dst_stride) {
double coefficients[2 * size + 1];
for (int k = 0; k < 2 * size + 1; ++k) {
coefficients[k] = kernel(2 * size - k);
}
// Fast path: if the kernel has a certain size, use the constant sized loops.
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
double sum = 0;
for (int k = -size; k <= size; ++k) {
if (vertical) {
if (y + k >= 0 && y + k < height) {
sum += src[k * src_line_stride] * coefficients[k + size];
}
} else {
if (x + k >= 0 && x + k < width) {
sum += src[k * src_stride] * coefficients[k + size];
}
}
}
dst[0] = static_cast<float>(sum);
src += src_stride;
dst += dst_stride;
}
}
}
template<bool vertical>
void Convolve(const Array3Df &in,
const Vec &kernel,
Array3Df *out_pointer,
int plane) {
int width = in.Width();
int height = in.Height();
Array3Df &out = *out_pointer;
if (plane == -1) {
out.ResizeLike(in);
plane = 0;
}
assert(kernel.size() % 2 == 1);
assert(&in != out_pointer);
int src_line_stride = in.Stride(0);
int src_stride = in.Stride(1);
int dst_stride = out.Stride(1);
const float* src = in.Data();
float* dst = out.Data() + plane;
// Use a dispatch table to make most convolutions used in practice use the
// fast path.
int half_width = kernel.size() / 2;
switch (half_width) {
#define static_convolution(size) case size: \
FastConvolve<size, vertical>(kernel, width, height, src, src_stride, \
src_line_stride, dst, dst_stride); break;
static_convolution(1)
static_convolution(2)
static_convolution(3)
static_convolution(4)
static_convolution(5)
static_convolution(6)
static_convolution(7)
#undef static_convolution
default:
int dynamic_size = kernel.size() / 2;
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
double sum = 0;
// Slow path: this loop cannot be unrolled.
for (int k = -dynamic_size; k <= dynamic_size; ++k) {
if (vertical) {
if (y + k >= 0 && y + k < height) {
sum += src[k * src_line_stride] *
kernel(2 * dynamic_size - (k + dynamic_size));
}
} else {
if (x + k >= 0 && x + k < width) {
sum += src[k * src_stride] *
kernel(2 * dynamic_size - (k + dynamic_size));
}
}
}
dst[0] = static_cast<float>(sum);
src += src_stride;
dst += dst_stride;
}
}
}
}
void ConvolveHorizontal(const Array3Df &in,
const Vec &kernel,
Array3Df *out_pointer,
int plane) {
Convolve<false>(in, kernel, out_pointer, plane);
}
void ConvolveVertical(const Array3Df &in,
const Vec &kernel,
Array3Df *out_pointer,
int plane) {
Convolve<true>(in, kernel, out_pointer, plane);
}
void ConvolveGaussian(const Array3Df &in,
double sigma,
Array3Df *out_pointer) {
Vec kernel, derivative;
ComputeGaussianKernel(sigma, &kernel, &derivative);
Array3Df tmp;
ConvolveVertical(in, kernel, &tmp);
ConvolveHorizontal(tmp, kernel, out_pointer);
}
void ImageDerivatives(const Array3Df &in,
double sigma,
Array3Df *gradient_x,
Array3Df *gradient_y) {
Vec kernel, derivative;
ComputeGaussianKernel(sigma, &kernel, &derivative);
Array3Df tmp;
// Compute first derivative in x.
ConvolveVertical(in, kernel, &tmp);
ConvolveHorizontal(tmp, derivative, gradient_x);
// Compute first derivative in y.
ConvolveHorizontal(in, kernel, &tmp);
ConvolveVertical(tmp, derivative, gradient_y);
}
void BlurredImageAndDerivatives(const Array3Df &in,
double sigma,
Array3Df *blurred_image,
Array3Df *gradient_x,
Array3Df *gradient_y) {
Vec kernel, derivative;
ComputeGaussianKernel(sigma, &kernel, &derivative);
Array3Df tmp;
// Compute convolved image.
ConvolveVertical(in, kernel, &tmp);
ConvolveHorizontal(tmp, kernel, blurred_image);
// Compute first derivative in x (reusing vertical convolution above).
ConvolveHorizontal(tmp, derivative, gradient_x);
// Compute first derivative in y.
ConvolveHorizontal(in, kernel, &tmp);
ConvolveVertical(tmp, derivative, gradient_y);
}
// Compute the gaussian blur of an image and the derivatives of the blurred
// image, and store the results in three channels. Since the blurred value and
// gradients are closer in memory, this leads to better performance if all
// three values are needed at the same time.
void BlurredImageAndDerivativesChannels(const Array3Df &in,
double sigma,
Array3Df *blurred_and_gradxy) {
assert(in.Depth() == 1);
Vec kernel, derivative;
ComputeGaussianKernel(sigma, &kernel, &derivative);
// Compute convolved image.
Array3Df tmp;
ConvolveVertical(in, kernel, &tmp);
blurred_and_gradxy->Resize(in.Height(), in.Width(), 3);
ConvolveHorizontal(tmp, kernel, blurred_and_gradxy, 0);
// Compute first derivative in x.
ConvolveHorizontal(tmp, derivative, blurred_and_gradxy, 1);
// Compute first derivative in y.
ConvolveHorizontal(in, kernel, &tmp);
ConvolveVertical(tmp, derivative, blurred_and_gradxy, 2);
}
void BoxFilterHorizontal(const Array3Df &in,
int window_size,
Array3Df *out_pointer) {
Array3Df &out = *out_pointer;
out.ResizeLike(in);
int half_width = (window_size - 1) / 2;
for (int k = 0; k < in.Depth(); ++k) {
for (int i = 0; i < in.Height(); ++i) {
float sum = 0;
// Init sum.
for (int j = 0; j < half_width; ++j) {
sum += in(i, j, k);
}
// Fill left border.
for (int j = 0; j < half_width + 1; ++j) {
sum += in(i, j + half_width, k);
out(i, j, k) = sum;
}
// Fill interior.
for (int j = half_width + 1; j < in.Width()-half_width; ++j) {
sum -= in(i, j - half_width - 1, k);
sum += in(i, j + half_width, k);
out(i, j, k) = sum;
}
// Fill right border.
for (int j = in.Width() - half_width; j < in.Width(); ++j) {
sum -= in(i, j - half_width - 1, k);
out(i, j, k) = sum;
}
}
}
}
void BoxFilterVertical(const Array3Df &in,
int window_size,
Array3Df *out_pointer) {
Array3Df &out = *out_pointer;
out.ResizeLike(in);
int half_width = (window_size - 1) / 2;
for (int k = 0; k < in.Depth(); ++k) {
for (int j = 0; j < in.Width(); ++j) {
float sum = 0;
// Init sum.
for (int i = 0; i < half_width; ++i) {
sum += in(i, j, k);
}
// Fill left border.
for (int i = 0; i < half_width + 1; ++i) {
sum += in(i + half_width, j, k);
out(i, j, k) = sum;
}
// Fill interior.
for (int i = half_width + 1; i < in.Height()-half_width; ++i) {
sum -= in(i - half_width - 1, j, k);
sum += in(i + half_width, j, k);
out(i, j, k) = sum;
}
// Fill right border.
for (int i = in.Height() - half_width; i < in.Height(); ++i) {
sum -= in(i - half_width - 1, j, k);
out(i, j, k) = sum;
}
}
}
}
void BoxFilter(const Array3Df &in,
int box_width,
Array3Df *out) {
Array3Df tmp;
BoxFilterHorizontal(in, box_width, &tmp);
BoxFilterVertical(tmp, box_width, out);
}
void LaplaceFilter(unsigned char* src,
unsigned char* dst,
int width,
int height,
int strength) {
for (int y = 1; y < height-1; y++)
for (int x = 1; x < width-1; x++) {
const unsigned char* s = &src[y*width+x];
int l = 128 +
s[-width-1] + s[-width] + s[-width+1] +
s[1] - 8*s[0] + s[1] +
s[ width-1] + s[ width] + s[ width+1];
int d = ((256-strength)*s[0] + strength*l) / 256;
if (d < 0) d=0;
if (d > 255) d=255;
dst[y*width+x] = d;
}
}
} // namespace libmv

View File

@@ -1,107 +0,0 @@
// Copyright (c) 2007, 2008, 2011 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_IMAGE_CONVOLVE_H_
#define LIBMV_IMAGE_CONVOLVE_H_
#include "libmv/image/image.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
// TODO(keir): Find a better place for these functions. gaussian.h in numeric?
// Zero mean Gaussian.
inline double Gaussian(double x, double sigma) {
return 1/sqrt(2*M_PI*sigma*sigma) * exp(-(x*x/2/sigma/sigma));
}
// 2D gaussian (zero mean)
// (9) in http://mathworld.wolfram.com/GaussianFunction.html
inline double Gaussian2D(double x, double y, double sigma) {
return 1.0/(2.0*M_PI*sigma*sigma) * exp( -(x*x+y*y)/(2.0*sigma*sigma));
}
inline double GaussianDerivative(double x, double sigma) {
return -x / sigma / sigma * Gaussian(x, sigma);
}
// Solve the inverse of the Gaussian for positive x.
inline double GaussianInversePositive(double y, double sigma) {
return sqrt(-2 * sigma * sigma * log(y * sigma * sqrt(2*M_PI)));
}
void ComputeGaussianKernel(double sigma, Vec *kernel, Vec *derivative);
void ConvolveHorizontal(const FloatImage &in,
const Vec &kernel,
FloatImage *out_pointer,
int plane = -1);
void ConvolveVertical(const FloatImage &in,
const Vec &kernel,
FloatImage *out_pointer,
int plane = -1);
void ConvolveGaussian(const FloatImage &in,
double sigma,
FloatImage *out_pointer);
void ImageDerivatives(const FloatImage &in,
double sigma,
FloatImage *gradient_x,
FloatImage *gradient_y);
void BlurredImageAndDerivatives(const FloatImage &in,
double sigma,
FloatImage *blurred_image,
FloatImage *gradient_x,
FloatImage *gradient_y);
// Blur and take the gradients of an image, storing the results inside the
// three channels of blurred_and_gradxy.
void BlurredImageAndDerivativesChannels(const FloatImage &in,
double sigma,
FloatImage *blurred_and_gradxy);
void BoxFilterHorizontal(const FloatImage &in,
int window_size,
FloatImage *out_pointer);
void BoxFilterVertical(const FloatImage &in,
int window_size,
FloatImage *out_pointer);
void BoxFilter(const FloatImage &in,
int box_width,
FloatImage *out);
/*!
Convolve \a src into \a dst with the discrete laplacian operator.
\a src and \a dst should be \a width x \a height images.
\a strength is an interpolation coefficient (0-256) between original image and the laplacian.
\note Make sure the search region is filtered with the same strength as the pattern.
*/
void LaplaceFilter(unsigned char* src,
unsigned char* dst,
int width,
int height,
int strength);
} // namespace libmv
#endif // LIBMV_IMAGE_CONVOLVE_H_

View File

@@ -1,110 +0,0 @@
// Copyright (c) 2007, 2008 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include <iostream>
#include "libmv/image/convolve.h"
#include "libmv/image/image.h"
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
using namespace libmv;
namespace {
TEST(Convolve, ComputeGaussianKernel) {
Vec kernel, derivative;
ComputeGaussianKernel(1, &kernel, &derivative);
EXPECT_EQ(7, kernel.size());
// TODO(keir): Put in a more thorough test here!
}
TEST(Convolve, ConvolveGaussian) {
FloatImage im(10, 10);
im.Fill(1);
FloatImage blured;
ConvolveGaussian(im, 3, &blured);
EXPECT_NEAR(im(5, 5), 1, 1e-7);
}
TEST(Convolve, BoxFilterHorizontal) {
FloatImage im(10, 10), convolved, filtered;
im.Fill(1);
BoxFilterHorizontal(im, 3, &filtered);
Vec kernel(3);
kernel.setConstant(1.);
ConvolveHorizontal(im, kernel, &convolved);
EXPECT_EQ(filtered(5, 5), 3);
EXPECT_TRUE(filtered == convolved);
}
TEST(Convolve, BoxFilter) {
FloatImage image(5, 5), filtered;
// A single 1.0 inside a 5x5 image should expand to a 3x3 square.
image.Fill(0);
image(2, 2) = 1.0;
BoxFilter(image, 3, &filtered);
for (int j = 0; j < 5; j++) {
for (int i = 0; i < 5; i++) {
if (i == 0 || i == 4 || j == 0 || j == 4) {
EXPECT_EQ(0.0, filtered(j, i));
} else {
EXPECT_EQ(1.0, filtered(j, i));
}
}
}
}
TEST(Convolve, BlurredImageAndDerivativesChannelsFlat) {
FloatImage im(10, 10), blurred_and_derivatives;
im.Fill(1);
BlurredImageAndDerivativesChannels(im, 1.0, &blurred_and_derivatives);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 0), 1.0, 1e-7);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 1), 0.0, 1e-7);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 2), 0.0, 1e-7);
}
TEST(Convolve, BlurredImageAndDerivativesChannelsHorizontalSlope) {
FloatImage image(10, 10), blurred_and_derivatives;
for (int j = 0; j < 10; ++j) {
for (int i = 0; i < 10; ++i) {
image(j, i) = 2*i;
}
}
BlurredImageAndDerivativesChannels(image, 0.9, &blurred_and_derivatives);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 0), 10.0, 1e-7);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 1), 2.0, 1e-7);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 2), 0.0, 1e-7);
}
TEST(Convolve, BlurredImageAndDerivativesChannelsVerticalSlope) {
FloatImage image(10, 10), blurred_and_derivatives;
for (int j = 0; j < 10; ++j) {
for (int i = 0; i < 10; ++i) {
image(j, i) = 2*j;
}
}
BlurredImageAndDerivativesChannels(image, 0.9, &blurred_and_derivatives);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 0), 10.0, 1e-7);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 1), 0.0, 1e-7);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 2), 2.0, 1e-7);
}
} // namespace

View File

@@ -1,74 +0,0 @@
// Copyright (c) 2012 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_IMAGE_CORRELATION_H
#define LIBMV_IMAGE_CORRELATION_H
#include "libmv/logging/logging.h"
#include "libmv/image/image.h"
namespace libmv {
inline double PearsonProductMomentCorrelation(
const FloatImage &image_and_gradient1_sampled,
const FloatImage &image_and_gradient2_sampled) {
assert(image_and_gradient1_sampled.Width() ==
image_and_gradient2_sampled.Width());
assert(image_and_gradient1_sampled.Height() ==
image_and_gradient2_sampled.Height());
const int width = image_and_gradient1_sampled.Width(),
height = image_and_gradient1_sampled.Height();
double sX = 0, sY = 0, sXX = 0, sYY = 0, sXY = 0;
for (int r = 0; r < height; ++r) {
for (int c = 0; c < width; ++c) {
double x = image_and_gradient1_sampled(r, c, 0);
double y = image_and_gradient2_sampled(r, c, 0);
sX += x;
sY += y;
sXX += x * x;
sYY += y * y;
sXY += x * y;
}
}
// Normalize.
double N = width * height;
sX /= N;
sY /= N;
sXX /= N;
sYY /= N;
sXY /= N;
double var_x = sXX - sX * sX;
double var_y = sYY - sY * sY;
double covariance_xy = sXY - sX * sY;
double correlation = covariance_xy / sqrt(var_x * var_y);
LG << "Covariance xy: " << covariance_xy
<< ", var 1: " << var_x << ", var 2: " << var_y
<< ", correlation: " << correlation;
return correlation;
}
} // namespace libmv
#endif // LIBMV_IMAGE_IMAGE_CORRELATION_H

View File

@@ -1,155 +0,0 @@
// Copyright (c) 2007, 2008 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_IMAGE_IMAGE_H
#define LIBMV_IMAGE_IMAGE_H
#include <cmath>
#include "libmv/image/array_nd.h"
namespace libmv {
typedef Array3Du ByteImage; // For backwards compatibility.
typedef Array3Df FloatImage;
// Type added only to manage special 2D array for feature detection
typedef Array3Di IntImage;
typedef Array3Ds ShortImage;
// An image class that is a thin wrapper around Array3D's of various types.
// TODO(keir): Decide if we should add reference counting semantics... Maybe it
// is the best solution after all.
class Image {
public:
// Create an image from an array. The image takes ownership of the array.
Image(Array3Du *array) : array_type_(BYTE), array_(array) {}
Image(Array3Df *array) : array_type_(FLOAT), array_(array) {}
Image(const Image &img): array_type_(NONE), array_(NULL) {
*this = img;
}
// Underlying data type.
enum DataType {
NONE,
BYTE,
FLOAT,
INT,
SHORT,
};
// Size in bytes that the image takes in memory.
int MemorySizeInBytes() {
int size;
switch (array_type_) {
case BYTE:
size = reinterpret_cast<Array3Du *>(array_)->MemorySizeInBytes();
break;
case FLOAT:
size = reinterpret_cast<Array3Df *>(array_)->MemorySizeInBytes();
break;
case INT:
size = reinterpret_cast<Array3Di *>(array_)->MemorySizeInBytes();
break;
case SHORT:
size = reinterpret_cast<Array3Ds *>(array_)->MemorySizeInBytes();
break;
default :
size = 0;
assert(0);
}
size += sizeof(*this);
return size;
}
~Image() {
switch (array_type_) {
case BYTE:
delete reinterpret_cast<Array3Du *>(array_);
break;
case FLOAT:
delete reinterpret_cast<Array3Df *>(array_);
break;
case INT:
delete reinterpret_cast<Array3Di *>(array_);
break;
case SHORT:
delete reinterpret_cast<Array3Ds *>(array_);
break;
default:
assert(0);
}
}
Image& operator= (const Image& f) {
if (this != &f) {
array_type_ = f.array_type_;
switch (array_type_) {
case BYTE:
delete reinterpret_cast<Array3Du *>(array_);
array_ = new Array3Du(*(Array3Du *)f.array_);
break;
case FLOAT:
delete reinterpret_cast<Array3Df *>(array_);
array_ = new Array3Df(*(Array3Df *)f.array_);
break;
case INT:
delete reinterpret_cast<Array3Di *>(array_);
array_ = new Array3Di(*(Array3Di *)f.array_);
break;
case SHORT:
delete reinterpret_cast<Array3Ds *>(array_);
array_ = new Array3Ds(*(Array3Ds *)f.array_);
break;
default:
assert(0);
}
}
return *this;
}
Array3Du *AsArray3Du() const {
if (array_type_ == BYTE) {
return reinterpret_cast<Array3Du *>(array_);
}
return NULL;
}
Array3Df *AsArray3Df() const {
if (array_type_ == FLOAT) {
return reinterpret_cast<Array3Df *>(array_);
}
return NULL;
}
private:
DataType array_type_;
BaseArray *array_;
};
} // namespace libmv
#endif // LIBMV_IMAGE_IMAGE_IMAGE_H

View File

@@ -1,81 +0,0 @@
// Copyright (c) 2009 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_IMAGE_IMAGE_CONVERTER_H
#define LIBMV_IMAGE_IMAGE_CONVERTER_H
#include "libmv/image/array_nd.h"
namespace libmv {
// The factor comes from http://www.easyrgb.com/
// RGB to XYZ : Y is the luminance channel
// var_R * 0.2126 + var_G * 0.7152 + var_B * 0.0722
template<typename T>
inline T RGB2GRAY(const T r, const T g, const T b) {
return static_cast<T>(r * 0.2126 + g * 0.7152 + b * 0.0722);
}
/*
// Specialization for the uchar type. (that do not want to work...)
template<>
inline unsigned char RGB2GRAY<unsigned char>(const unsigned char r,
const unsigned char g,
const unsigned char b) {
return (unsigned char)(r * 0.2126 + g * 0.7152 + b * 0.0722 +0.5);
}*/
template<class ImageIn, class ImageOut>
void Rgb2Gray(const ImageIn &imaIn, ImageOut *imaOut) {
// It is all fine to cnvert RGBA image here as well,
// all the additional channels will be nicely ignored.
assert(imaIn.Depth() >= 3);
imaOut->Resize(imaIn.Height(), imaIn.Width(), 1);
// Convert each RGB pixel into Gray value (luminance)
for (int j = 0; j < imaIn.Height(); ++j) {
for (int i = 0; i < imaIn.Width(); ++i) {
(*imaOut)(j, i) = RGB2GRAY(imaIn(j, i, 0) , imaIn(j, i, 1), imaIn(j, i, 2));
}
}
}
// Convert given float image to an unsigned char array of pixels.
template<class Image>
unsigned char *FloatImageToUCharArray(const Image &image) {
unsigned char *buffer =
new unsigned char[image.Width() * image.Height() * image.Depth()];
for (int y = 0; y < image.Height(); y++) {
for (int x = 0; x < image.Width(); x++) {
for (int d = 0; d < image.Depth(); d++) {
int index = (y * image.Width() + x) * image.Depth() + d;
buffer[index] = 255.0 * image(y, x, d);
}
}
}
return buffer;
}
} // namespace libmv
#endif // LIBMV_IMAGE_IMAGE_CONVERTER_H

View File

@@ -1,285 +0,0 @@
// Copyright (c) 2009 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// Generic Image Processing Algorithm (GIPA)
// Use an ImageModel class that must implement the following :
//
// ::Contains(int y, int x) <= Tell if a point is inside or not the image
// ::operator(int y,int x) <= Modification accessor over the pixel (y,x)
// ::Width()
// ::Height()
#ifndef LIBMV_IMAGE_IMAGE_DRAWING_H
#define LIBMV_IMAGE_IMAGE_DRAWING_H
namespace libmv {
/// Put the pixel in the image to the given color only if the point (xc,yc)
/// is inside the image.
template <class Image, class Color>
inline void safePutPixel(int yc, int xc, const Color & col, Image *pim) {
if (!pim)
return;
if (pim->Contains(yc, xc)) {
(*pim)(yc, xc) = col;
}
}
/// Put the pixel in the image to the given color only if the point (xc,yc)
/// is inside the image. This function support multi-channel color
/// \note The color pointer col must have size as the image depth
template <class Image, class Color>
inline void safePutPixel(int yc, int xc, const Color *col, Image *pim) {
if (!pim)
return;
if (pim->Contains(yc, xc)) {
for (int i = 0; i < pim->Depth(); ++i)
(*pim)(yc, xc, i) = *(col + i);
}
}
// Bresenham approach to draw ellipse.
// http://raphaello.univ-fcomte.fr/ig/algorithme/ExemplesGLUt/BresenhamEllipse.htm
// Add the rotation of the ellipse.
// As the algo. use symmetry we must use 4 rotations.
template <class Image, class Color>
void DrawEllipse(int xc, int yc, int radiusA, int radiusB,
const Color &col, Image *pim, double angle = 0.0) {
int a = radiusA;
int b = radiusB;
// Counter Clockwise rotation matrix.
double matXY[4] = { cos(angle), sin(angle),
-sin(angle), cos(angle)};
int x, y;
double d1, d2;
x = 0;
y = b;
d1 = b*b - a*a*b + a*a/4;
float rotX = (matXY[0] * x + matXY[1] * y);
float rotY = (matXY[2] * x + matXY[3] * y);
safePutPixel(yc + rotY, xc + rotX, col, pim);
rotX = (matXY[0] * x - matXY[1] * y);
rotY = (matXY[2] * x - matXY[3] * y);
safePutPixel(yc + rotY, xc + rotX, col, pim);
rotX = (-matXY[0] * x - matXY[1] * y);
rotY = (-matXY[2] * x - matXY[3] * y);
safePutPixel(yc + rotY, xc + rotX, col, pim);
rotX = (-matXY[0] * x + matXY[1] * y);
rotY = (-matXY[2] * x + matXY[3] * y);
safePutPixel(yc + rotY, xc + rotX, col, pim);
while (a*a*(y-.5) > b*b*(x+1)) {
if (d1 < 0) {
d1 += b*b*(2*x+3);
++x;
} else {
d1 += b*b*(2*x+3) + a*a*(-2*y+2);
++x;
--y;
}
rotX = (matXY[0] * x + matXY[1] * y);
rotY = (matXY[2] * x + matXY[3] * y);
safePutPixel(yc + rotY, xc + rotX, col, pim);
rotX = (matXY[0] * x - matXY[1] * y);
rotY = (matXY[2] * x - matXY[3] * y);
safePutPixel(yc + rotY, xc + rotX, col, pim);
rotX = (-matXY[0] * x - matXY[1] * y);
rotY = (-matXY[2] * x - matXY[3] * y);
safePutPixel(yc + rotY, xc + rotX, col, pim);
rotX = (-matXY[0] * x + matXY[1] * y);
rotY = (-matXY[2] * x + matXY[3] * y);
safePutPixel(yc + rotY, xc + rotX, col, pim);
}
d2 = b*b*(x+.5)*(x+.5) + a*a*(y-1)*(y-1) - a*a*b*b;
while (y > 0) {
if (d2 < 0) {
d2 += b*b*(2*x+2) + a*a*(-2*y+3);
--y;
++x;
} else {
d2 += a*a*(-2*y+3);
--y;
}
rotX = (matXY[0] * x + matXY[1] * y);
rotY = (matXY[2] * x + matXY[3] * y);
safePutPixel(yc + rotY, xc + rotX, col, pim);
rotX = (matXY[0] * x - matXY[1] * y);
rotY = (matXY[2] * x - matXY[3] * y);
safePutPixel(yc + rotY, xc + rotX, col, pim);
rotX = (-matXY[0] * x - matXY[1] * y);
rotY = (-matXY[2] * x - matXY[3] * y);
safePutPixel(yc + rotY, xc + rotX, col, pim);
rotX = (-matXY[0] * x + matXY[1] * y);
rotY = (-matXY[2] * x + matXY[3] * y);
safePutPixel(yc + rotY, xc + rotX, col, pim);
}
}
// Bresenham approach do not allow to draw concentric circle without holes.
// So it's better the use the Andres method.
// http://fr.wikipedia.org/wiki/Algorithme_de_tracé_de_cercle_d'Andres.
template <class Image, class Color>
void DrawCircle(int x, int y, int radius, const Color &col, Image *pim) {
Image &im = *pim;
if ( im.Contains(y + radius, x + radius)
|| im.Contains(y + radius, x - radius)
|| im.Contains(y - radius, x + radius)
|| im.Contains(y - radius, x - radius)) {
int x1 = 0;
int y1 = radius;
int d = radius - 1;
while (y1 >= x1) {
// Draw the point for each octant.
safePutPixel( y1 + y, x1 + x, col, pim);
safePutPixel( x1 + y, y1 + x, col, pim);
safePutPixel( y1 + y, -x1 + x, col, pim);
safePutPixel( x1 + y, -y1 + x, col, pim);
safePutPixel(-y1 + y, x1 + x, col, pim);
safePutPixel(-x1 + y, y1 + x, col, pim);
safePutPixel(-y1 + y, -x1 + x, col, pim);
safePutPixel(-x1 + y, -y1 + x, col, pim);
if (d >= 2 * x1) {
d = d - 2 * x1 - 1;
x1 += 1;
} else {
if (d <= 2 * (radius - y1)) {
d = d + 2 * y1 - 1;
y1 -= 1;
} else {
d = d + 2 * (y1 - x1 - 1);
y1 -= 1;
x1 += 1;
}
}
}
}
}
// Bresenham algorithm
template <class Image, class Color>
void DrawLine(int xa, int ya, int xb, int yb, const Color &col, Image *pim) {
Image &im = *pim;
// If one point is outside the image
// Replace the outside point by the intersection of the line and
// the limit (either x=width or y=height).
if (!im.Contains(ya, xa) || !im.Contains(yb, xb)) {
int width = pim->Width();
int height = pim->Height();
const bool xdir = xa < xb, ydir = ya < yb;
float nx0 = xa, nx1 = xb, ny0 = ya, ny1 = yb,
&xleft = xdir?nx0:nx1, &yleft = xdir?ny0:ny1,
&xright = xdir?nx1:nx0, &yright = xdir?ny1:ny0,
&xup = ydir?nx0:nx1, &yup = ydir?ny0:ny1,
&xdown = ydir?nx1:nx0, &ydown = ydir?ny1:ny0;
if (xright < 0 || xleft >= width) return;
if (xleft < 0) {
yleft -= xleft*(yright - yleft)/(xright - xleft);
xleft = 0;
}
if (xright >= width) {
yright -= (xright - width)*(yright - yleft)/(xright - xleft);
xright = width - 1;
}
if (ydown < 0 || yup >= height) return;
if (yup < 0) {
xup -= yup*(xdown - xup)/(ydown - yup);
yup = 0;
}
if (ydown >= height) {
xdown -= (ydown - height)*(xdown - xup)/(ydown - yup);
ydown = height - 1;
}
xa = (int) xleft;
xb = (int) xright;
ya = (int) yleft;
yb = (int) yright;
}
int xbas, xhaut, ybas, yhaut;
// Check the condition ybas < yhaut.
if (ya <= yb) {
xbas = xa;
ybas = ya;
xhaut = xb;
yhaut = yb;
} else {
xbas = xb;
ybas = yb;
xhaut = xa;
yhaut = ya;
}
// Initialize slope.
int x, y, dx, dy, incrmX, incrmY, dp, N, S;
dx = xhaut - xbas;
dy = yhaut - ybas;
if (dx > 0) { // If xhaut > xbas we will increment X.
incrmX = 1;
} else {
incrmX = -1; // else we will decrement X.
dx *= -1;
}
if (dy > 0) { // Positive slope will increment X.
incrmY = 1;
} else { // Negative slope.
incrmY = -1;
}
if (dx >= dy) {
dp = 2 * dy - dx;
S = 2 * dy;
N = 2 * (dy - dx);
y = ybas;
x = xbas;
while (x != xhaut) {
safePutPixel(y, x, col, pim);
x += incrmX;
if (dp <= 0) { // Go in direction of the South Pixel.
dp += S;
} else { // Go to the North.
dp += N;
y+=incrmY;
}
}
} else {
dp = 2 * dx - dy;
S = 2 * dx;
N = 2 * (dx - dy);
x = xbas;
y = ybas;
while (y < yhaut) {
safePutPixel(y, x, col, pim);
y += incrmY;
if (dp <= 0) { // Go in direction of the South Pixel.
dp += S;
} else { // Go to the North.
dp += N;
x += incrmX;
}
}
}
safePutPixel(y, x, col, pim);
}
} // namespace libmv
#endif // LIBMV_IMAGE_IMAGE_DRAWING_H

View File

@@ -1,45 +0,0 @@
// Copyright (c) 2007, 2008 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include <iostream>
#include "libmv/image/image.h"
#include "testing/testing.h"
using libmv::Image;
using libmv::Array3Df;
namespace {
TEST(Image, SimpleImageAccessors) {
Array3Df *array = new Array3Df(2, 3);
Image image(array);
EXPECT_EQ(array, image.AsArray3Df());
EXPECT_TRUE(NULL == image.AsArray3Du());
}
TEST(Image, MemorySizeInBytes) {
Array3Df *array = new Array3Df(2, 3);
Image image(array);
int size = sizeof(image) + array->MemorySizeInBytes();
EXPECT_EQ(size, image.MemorySizeInBytes());
}
} // namespace

View File

@@ -1,138 +0,0 @@
// Copyright (c) 2007, 2008 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_IMAGE_SAMPLE_H_
#define LIBMV_IMAGE_SAMPLE_H_
#include "libmv/image/image.h"
namespace libmv {
/// Nearest neighbor interpolation.
template<typename T>
inline T SampleNearest(const Array3D<T> &image,
float y, float x, int v = 0) {
const int i = int(round(y));
const int j = int(round(x));
return image(i, j, v);
}
inline void LinearInitAxis(float x, int size,
int *x1, int *x2,
float *dx) {
const int ix = static_cast<int>(x);
if (ix < 0) {
*x1 = 0;
*x2 = 0;
*dx = 1.0;
} else if (ix > size - 2) {
*x1 = size - 1;
*x2 = size - 1;
*dx = 1.0;
} else {
*x1 = ix;
*x2 = ix + 1;
*dx = *x2 - x;
}
}
/// Linear interpolation.
template<typename T>
inline T SampleLinear(const Array3D<T> &image, float y, float x, int v = 0) {
int x1, y1, x2, y2;
float dx, dy;
LinearInitAxis(y, image.Height(), &y1, &y2, &dy);
LinearInitAxis(x, image.Width(), &x1, &x2, &dx);
const T im11 = image(y1, x1, v);
const T im12 = image(y1, x2, v);
const T im21 = image(y2, x1, v);
const T im22 = image(y2, x2, v);
return T( dy * (dx * im11 + (1.0 - dx) * im12) +
(1 - dy) * (dx * im21 + (1.0 - dx) * im22));
}
/// Linear interpolation, of all channels. The sample is assumed to have the
/// same size as the number of channels in image.
template<typename T>
inline void SampleLinear(const Array3D<T> &image, float y, float x, T *sample) {
int x1, y1, x2, y2;
float dx, dy;
LinearInitAxis(y, image.Height(), &y1, &y2, &dy);
LinearInitAxis(x, image.Width(), &x1, &x2, &dx);
for (int i = 0; i < image.Depth(); ++i) {
const T im11 = image(y1, x1, i);
const T im12 = image(y1, x2, i);
const T im21 = image(y2, x1, i);
const T im22 = image(y2, x2, i);
sample[i] = T( dy * (dx * im11 + (1.0 - dx) * im12) +
(1 - dy) * (dx * im21 + (1.0 - dx) * im22));
}
}
// Downsample all channels by 2. If the image has odd width or height, the last
// row or column is ignored.
// FIXME(MatthiasF): this implementation shouldn't be in an interface file
inline void DownsampleChannelsBy2(const Array3Df &in, Array3Df *out) {
int height = in.Height() / 2;
int width = in.Width() / 2;
int depth = in.Depth();
out->Resize(height, width, depth);
// 2x2 box filter downsampling.
for (int r = 0; r < height; ++r) {
for (int c = 0; c < width; ++c) {
for (int k = 0; k < depth; ++k) {
(*out)(r, c, k) = (in(2 * r, 2 * c, k) +
in(2 * r + 1, 2 * c, k) +
in(2 * r, 2 * c + 1, k) +
in(2 * r + 1, 2 * c + 1, k)) / 4.0f;
}
}
}
}
// Sample a region centered at x,y in image with size extending by half_width
// from x,y. Channels specifies the number of channels to sample from.
inline void SamplePattern(const FloatImage &image,
double x, double y,
int half_width,
int channels,
FloatImage *sampled) {
sampled->Resize(2 * half_width + 1, 2 * half_width + 1, channels);
for (int r = -half_width; r <= half_width; ++r) {
for (int c = -half_width; c <= half_width; ++c) {
for (int i = 0; i < channels; ++i) {
(*sampled)(r + half_width, c + half_width, i) =
SampleLinear(image, y + r, x + c, i);
}
}
}
}
} // namespace libmv
#endif // LIBMV_IMAGE_SAMPLE_H_

View File

@@ -1,89 +0,0 @@
// Copyright (c) 2007, 2008 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/image/sample.h"
#include "testing/testing.h"
using namespace libmv;
namespace {
TEST(Image, Nearest) {
Array3Du image(2, 2);
image(0, 0) = 0;
image(0, 1) = 1;
image(1, 0) = 2;
image(1, 1) = 3;
EXPECT_EQ(0, SampleNearest(image, -0.4f, -0.4f));
EXPECT_EQ(0, SampleNearest(image, 0.4f, 0.4f));
EXPECT_EQ(3, SampleNearest(image, 0.6f, 0.6f));
EXPECT_EQ(3, SampleNearest(image, 1.4f, 1.4f));
}
TEST(Image, Linear) {
Array3Df image(2, 2);
image(0, 0) = 0;
image(0, 1) = 1;
image(1, 0) = 2;
image(1, 1) = 3;
EXPECT_EQ(1.5, SampleLinear(image, 0.5, 0.5));
}
TEST(Image, DownsampleBy2) {
Array3Df image(2, 2);
image(0, 0) = 0;
image(0, 1) = 1;
image(1, 0) = 2;
image(1, 1) = 3;
Array3Df resampled_image;
DownsampleChannelsBy2(image, &resampled_image);
ASSERT_EQ(1, resampled_image.Height());
ASSERT_EQ(1, resampled_image.Width());
ASSERT_EQ(1, resampled_image.Depth());
EXPECT_FLOAT_EQ(6./4., resampled_image(0, 0));
}
TEST(Image, DownsampleBy2MultiChannel) {
Array3Df image(2, 2, 3);
image(0, 0, 0) = 0;
image(0, 1, 0) = 1;
image(1, 0, 0) = 2;
image(1, 1, 0) = 3;
image(0, 0, 1) = 5;
image(0, 1, 1) = 6;
image(1, 0, 1) = 7;
image(1, 1, 1) = 8;
image(0, 0, 2) = 9;
image(0, 1, 2) = 10;
image(1, 0, 2) = 11;
image(1, 1, 2) = 12;
Array3Df resampled_image;
DownsampleChannelsBy2(image, &resampled_image);
ASSERT_EQ(1, resampled_image.Height());
ASSERT_EQ(1, resampled_image.Width());
ASSERT_EQ(3, resampled_image.Depth());
EXPECT_FLOAT_EQ((0+1+2+3)/4., resampled_image(0, 0, 0));
EXPECT_FLOAT_EQ((5+6+7+8)/4., resampled_image(0, 0, 1));
EXPECT_FLOAT_EQ((9+10+11+12)/4., resampled_image(0, 0, 2));
}
} // namespace

View File

@@ -1,90 +0,0 @@
// Copyright (c) 2007, 2008 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_IMAGE_TUPLE_H
#define LIBMV_IMAGE_TUPLE_H
#include <algorithm>
namespace libmv {
// A vector of elements with fixed lenght and deep copy semantics.
template <typename T, int N>
class Tuple {
public:
enum { SIZE = N };
Tuple() {}
Tuple(T initial_value) { Reset(initial_value); }
template <typename D>
Tuple(D *values) { Reset(values); }
template <typename D>
Tuple(const Tuple<D, N> &b) { Reset(b); }
template <typename D>
Tuple& operator=(const Tuple<D, N>& b) {
Reset(b);
return *this;
}
template <typename D>
void Reset(const Tuple<D, N>& b) { Reset(b.Data()); }
template <typename D>
void Reset(D *values) {
for (int i = 0;i < N; i++) {
data_[i] = T(values[i]);
}
}
// Set all tuple values to the same thing.
void Reset(T value) {
for (int i = 0;i < N; i++) {
data_[i] = value;
}
}
// Pointer to the first element.
T *Data() { return &data_[0]; }
const T *Data() const { return &data_[0]; }
T &operator()(int i) { return data_[i]; }
const T &operator()(int i) const { return data_[i]; }
bool operator==(const Tuple<T, N> &other) const {
for (int i = 0; i < N; ++i) {
if ((*this)(i) != other(i)) {
return false;
}
}
return true;
}
bool operator!=(const Tuple<T, N> &other) const {
return !(*this == other);
}
private:
T data_[N];
};
} // namespace libmv
#endif // LIBMV_IMAGE_TUPLE_H

View File

@@ -1,83 +0,0 @@
// Copyright (c) 2007, 2008 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/image/tuple.h"
#include "testing/testing.h"
#include <algorithm>
using libmv::Tuple;
namespace {
TEST(Tuple, InitConstantValue) {
Tuple<int, 3> t(5);
EXPECT_EQ(t(0), 5);
EXPECT_EQ(t(0), 5);
EXPECT_EQ(t(0), 5);
}
TEST(Tuple, InitFromPointer) {
float vals[3] = {1.0f, 2.0f, 3.0f};
Tuple<float, 3> t(vals);
for (int i = 0; i < 3; i++)
EXPECT_EQ(t(i), vals[i]);
Tuple<int, 3> b(t);
EXPECT_EQ(b(0), int(vals[0]));
EXPECT_EQ(b(1), int(vals[1]));
EXPECT_EQ(b(2), int(vals[2]));
}
TEST(Tuple, Swap) {
unsigned char vala[3] = {1, 2, 3};
unsigned char valb[3] = {4, 5, 6};
Tuple<unsigned char, 3> a(vala);
Tuple<unsigned char, 3> b(valb);
std::swap(a, b);
EXPECT_EQ(a(0), int(valb[0]));
EXPECT_EQ(a(1), int(valb[1]));
EXPECT_EQ(a(2), int(valb[2]));
EXPECT_EQ(b(0), int(vala[0]));
EXPECT_EQ(b(1), int(vala[1]));
EXPECT_EQ(b(2), int(vala[2]));
}
TEST(Tuple, IsEqualOperator) {
Tuple<int, 3> a;
a(0) = 1;
a(1) = 2;
a(2) = 3;
Tuple<int, 3> b;
b(0) = 1;
b(1) = 2;
b(2) = 3;
EXPECT_TRUE(a == b);
EXPECT_FALSE(a != b);
b(1) = 5;
EXPECT_TRUE(a != b);
EXPECT_FALSE(a == b);
}
} // namespace

View File

@@ -1,31 +0,0 @@
// Copyright (c) 2007, 2008, 2009 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_LOGGING_LOGGING_H
#define LIBMV_LOGGING_LOGGING_H
#include <glog/logging.h>
#define LG LOG(INFO)
#define V0 LOG(INFO)
#define V1 LOG(INFO)
#define V2 LOG(INFO)
#endif // LIBMV_LOGGING_LOGGING_H

View File

@@ -1,99 +0,0 @@
// Copyright (c) 2010 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/multiview/conditioning.h"
#include "libmv/multiview/projection.h"
namespace libmv {
// HZ 4.4.4 pag.109: Point conditioning (non isotropic)
void PreconditionerFromPoints(const Mat &points, Mat3 *T) {
Vec mean, variance;
MeanAndVarianceAlongRows(points, &mean, &variance);
double xfactor = sqrt(2.0 / variance(0));
double yfactor = sqrt(2.0 / variance(1));
// If variance is equal to 0.0 set scaling factor to identity.
// -> Else it will provide nan value (because division by 0).
if (variance(0) < 1e-8)
xfactor = mean(0) = 1.0;
if (variance(1) < 1e-8)
yfactor = mean(1) = 1.0;
*T << xfactor, 0, -xfactor * mean(0),
0, yfactor, -yfactor * mean(1),
0, 0, 1;
}
// HZ 4.4.4 pag.107: Point conditioning (isotropic)
void IsotropicPreconditionerFromPoints(const Mat &points, Mat3 *T) {
Vec mean, variance;
MeanAndVarianceAlongRows(points, &mean, &variance);
double var_norm = variance.norm();
double factor = sqrt(2.0 / var_norm);
// If variance is equal to 0.0 set scaling factor to identity.
// -> Else it will provide nan value (because division by 0).
if (var_norm < 1e-8) {
factor = 1.0;
mean.setOnes();
}
*T << factor, 0, -factor * mean(0),
0, factor, -factor * mean(1),
0, 0, 1;
}
void ApplyTransformationToPoints(const Mat &points,
const Mat3 &T,
Mat *transformed_points) {
int n = points.cols();
transformed_points->resize(2, n);
Mat3X p(3, n);
EuclideanToHomogeneous(points, &p);
p = T * p;
HomogeneousToEuclidean(p, transformed_points);
}
void NormalizePoints(const Mat &points,
Mat *normalized_points,
Mat3 *T) {
PreconditionerFromPoints(points, T);
ApplyTransformationToPoints(points, *T, normalized_points);
}
void NormalizeIsotropicPoints(const Mat &points,
Mat *normalized_points,
Mat3 *T) {
IsotropicPreconditionerFromPoints(points, T);
ApplyTransformationToPoints(points, *T, normalized_points);
}
// Denormalize the results. See HZ page 109.
void UnnormalizerT::Unnormalize(const Mat3 &T1, const Mat3 &T2, Mat3 *H) {
*H = T2.transpose() * (*H) * T1;
}
void UnnormalizerI::Unnormalize(const Mat3 &T1, const Mat3 &T2, Mat3 *H) {
*H = T2.inverse() * (*H) * T1;
}
} // namespace libmv

View File

@@ -1,59 +0,0 @@
// Copyright (c) 2010 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_MULTIVIEW_CONDITIONNING_H_
#define LIBMV_MULTIVIEW_CONDITIONNING_H_
#include "libmv/numeric/numeric.h"
namespace libmv {
// Point conditioning (non isotropic)
void PreconditionerFromPoints(const Mat &points, Mat3 *T);
// Point conditioning (isotropic)
void IsotropicPreconditionerFromPoints(const Mat &points, Mat3 *T);
void ApplyTransformationToPoints(const Mat &points,
const Mat3 &T,
Mat *transformed_points);
void NormalizePoints(const Mat &points,
Mat *normalized_points,
Mat3 *T);
void NormalizeIsotropicPoints(const Mat &points,
Mat *normalized_points,
Mat3 *T);
/// Use inverse for unnormalize
struct UnnormalizerI {
// Denormalize the results. See HZ page 109.
static void Unnormalize(const Mat3 &T1, const Mat3 &T2, Mat3 *H);
};
/// Use transpose for unnormalize
struct UnnormalizerT {
// Denormalize the results. See HZ page 109.
static void Unnormalize(const Mat3 &T1, const Mat3 &T2, Mat3 *H);
};
} // namespace libmv
#endif // LIBMV_MULTIVIEW_CONDITIONNING_H_

View File

@@ -1,774 +0,0 @@
// Copyright (c) 2009 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/multiview/euclidean_resection.h"
#include <cmath>
#include <limits>
#include <Eigen/SVD>
#include <Eigen/Geometry>
#include "libmv/base/vector.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/projection.h"
namespace libmv {
namespace euclidean_resection {
typedef unsigned int uint;
bool EuclideanResection(const Mat2X &x_camera,
const Mat3X &X_world,
Mat3 *R, Vec3 *t,
ResectionMethod method) {
switch (method) {
case RESECTION_ANSAR_DANIILIDIS:
EuclideanResectionAnsarDaniilidis(x_camera, X_world, R, t);
break;
case RESECTION_EPNP:
return EuclideanResectionEPnP(x_camera, X_world, R, t);
break;
case RESECTION_PPNP:
return EuclideanResectionPPnP(x_camera, X_world, R, t);
break;
default:
LOG(FATAL) << "Unknown resection method.";
}
return false;
}
bool EuclideanResection(const Mat &x_image,
const Mat3X &X_world,
const Mat3 &K,
Mat3 *R, Vec3 *t,
ResectionMethod method) {
CHECK(x_image.rows() == 2 || x_image.rows() == 3)
<< "Invalid size for x_image: "
<< x_image.rows() << "x" << x_image.cols();
Mat2X x_camera;
if (x_image.rows() == 2) {
EuclideanToNormalizedCamera(x_image, K, &x_camera);
} else if (x_image.rows() == 3) {
HomogeneousToNormalizedCamera(x_image, K, &x_camera);
}
return EuclideanResection(x_camera, X_world, R, t, method);
}
void AbsoluteOrientation(const Mat3X &X,
const Mat3X &Xp,
Mat3 *R,
Vec3 *t) {
int num_points = X.cols();
Vec3 C = X.rowwise().sum() / num_points; // Centroid of X.
Vec3 Cp = Xp.rowwise().sum() / num_points; // Centroid of Xp.
// Normalize the two point sets.
Mat3X Xn(3, num_points), Xpn(3, num_points);
for (int i = 0; i < num_points; ++i) {
Xn.col(i) = X.col(i) - C;
Xpn.col(i) = Xp.col(i) - Cp;
}
// Construct the N matrix (pg. 635).
double Sxx = Xn.row(0).dot(Xpn.row(0));
double Syy = Xn.row(1).dot(Xpn.row(1));
double Szz = Xn.row(2).dot(Xpn.row(2));
double Sxy = Xn.row(0).dot(Xpn.row(1));
double Syx = Xn.row(1).dot(Xpn.row(0));
double Sxz = Xn.row(0).dot(Xpn.row(2));
double Szx = Xn.row(2).dot(Xpn.row(0));
double Syz = Xn.row(1).dot(Xpn.row(2));
double Szy = Xn.row(2).dot(Xpn.row(1));
Mat4 N;
N << Sxx + Syy + Szz, Syz - Szy, Szx - Sxz, Sxy - Syx,
Syz - Szy, Sxx - Syy - Szz, Sxy + Syx, Szx + Sxz,
Szx - Sxz, Sxy + Syx, -Sxx + Syy - Szz, Syz + Szy,
Sxy - Syx, Szx + Sxz, Syz + Szy, -Sxx - Syy + Szz;
// Find the unit quaternion q that maximizes qNq. It is the eigenvector
// corresponding to the lagest eigenvalue.
Vec4 q = N.jacobiSvd(Eigen::ComputeFullU).matrixU().col(0);
// Retrieve the 3x3 rotation matrix.
Vec4 qq = q.array() * q.array();
double q0q1 = q(0) * q(1);
double q0q2 = q(0) * q(2);
double q0q3 = q(0) * q(3);
double q1q2 = q(1) * q(2);
double q1q3 = q(1) * q(3);
double q2q3 = q(2) * q(3);
(*R) << qq(0) + qq(1) - qq(2) - qq(3),
2 * (q1q2 - q0q3),
2 * (q1q3 + q0q2),
2 * (q1q2+ q0q3),
qq(0) - qq(1) + qq(2) - qq(3),
2 * (q2q3 - q0q1),
2 * (q1q3 - q0q2),
2 * (q2q3 + q0q1),
qq(0) - qq(1) - qq(2) + qq(3);
// Fix the handedness of the R matrix.
if (R->determinant() < 0) {
R->row(2) = -R->row(2);
}
// Compute the final translation.
*t = Cp - *R * C;
}
// Convert i and j indices of the original variables into their quadratic
// permutation single index. It follows that t_ij = t_ji.
static int IJToPointIndex(int i, int j, int num_points) {
// Always make sure that j is bigger than i. This handles t_ij = t_ji.
if (j < i) {
std::swap(i, j);
}
int idx;
int num_permutation_rows = num_points * (num_points - 1) / 2;
// All t_ii's are located at the end of the t vector after all t_ij's.
if (j == i) {
idx = num_permutation_rows + i;
} else {
int offset = (num_points - i - 1) * (num_points - i) / 2;
idx = (num_permutation_rows - offset + j - i - 1);
}
return idx;
};
// Convert i and j indexes of the solution for lambda to their linear indexes.
static int IJToIndex(int i, int j, int num_lambda) {
if (j < i) {
std::swap(i, j);
}
int A = num_lambda * (num_lambda + 1) / 2;
int B = num_lambda - i;
int C = B * (B + 1) / 2;
int idx = A - C + j - i;
return idx;
};
static int Sign(double value) {
return (value < 0) ? -1 : 1;
};
// Organizes a square matrix into a single row constraint on the elements of
// Lambda to create the constraints in equation (5) in "Linear Pose Estimation
// from Points or Lines", by Ansar, A. and Daniilidis, PAMI 2003. vol. 25, no.
// 5.
static Vec MatrixToConstraint(const Mat &A,
int num_k_columns,
int num_lambda) {
Vec C(num_k_columns);
C.setZero();
int idx = 0;
for (int i = 0; i < num_lambda; ++i) {
for (int j = i; j < num_lambda; ++j) {
C(idx) = A(i, j);
if (i != j) {
C(idx) += A(j, i);
}
++idx;
}
}
return C;
}
// Normalizes the columns of vectors.
static void NormalizeColumnVectors(Mat3X *vectors) {
int num_columns = vectors->cols();
for (int i = 0; i < num_columns; ++i) {
vectors->col(i).normalize();
}
}
void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
const Mat3X &X_world,
Mat3 *R,
Vec3 *t) {
CHECK(x_camera.cols() == X_world.cols());
CHECK(x_camera.cols() > 3);
int num_points = x_camera.cols();
// Copy the normalized camera coords into 3 vectors and normalize them so
// that they are unit vectors from the camera center.
Mat3X x_camera_unit(3, num_points);
x_camera_unit.block(0, 0, 2, num_points) = x_camera;
x_camera_unit.row(2).setOnes();
NormalizeColumnVectors(&x_camera_unit);
int num_m_rows = num_points * (num_points - 1) / 2;
int num_tt_variables = num_points * (num_points + 1) / 2;
int num_m_columns = num_tt_variables + 1;
Mat M(num_m_columns, num_m_columns);
M.setZero();
Matu ij_index(num_tt_variables, 2);
// Create the constraint equations for the t_ij variables (7) and arrange
// them into the M matrix (8). Also store the initial (i, j) indices.
int row = 0;
for (int i = 0; i < num_points; ++i) {
for (int j = i+1; j < num_points; ++j) {
M(row, row) = -2 * x_camera_unit.col(i).dot(x_camera_unit.col(j));
M(row, num_m_rows + i) = x_camera_unit.col(i).dot(x_camera_unit.col(i));
M(row, num_m_rows + j) = x_camera_unit.col(j).dot(x_camera_unit.col(j));
Vec3 Xdiff = X_world.col(i) - X_world.col(j);
double center_to_point_distance = Xdiff.norm();
M(row, num_m_columns - 1) =
- center_to_point_distance * center_to_point_distance;
ij_index(row, 0) = i;
ij_index(row, 1) = j;
++row;
}
ij_index(i + num_m_rows, 0) = i;
ij_index(i + num_m_rows, 1) = i;
}
int num_lambda = num_points + 1; // Dimension of the null space of M.
Mat V = M.jacobiSvd(Eigen::ComputeFullV).matrixV().block(0,
num_m_rows,
num_m_columns,
num_lambda);
// TODO(vess): The number of constraint equations in K (num_k_rows) must be
// (num_points + 1) * (num_points + 2)/2. This creates a performance issue
// for more than 4 points. It is fine for 4 points at the moment with 18
// instead of 15 equations.
int num_k_rows = num_m_rows + num_points *
(num_points*(num_points-1)/2 - num_points+1);
int num_k_columns = num_lambda * (num_lambda + 1) / 2;
Mat K(num_k_rows, num_k_columns);
K.setZero();
// Construct the first part of the K matrix corresponding to (t_ii, t_jk) for
// i != j.
int counter_k_row = 0;
for (int idx1 = num_m_rows; idx1 < num_tt_variables; ++idx1) {
for (int idx2 = 0; idx2 < num_m_rows; ++idx2) {
unsigned int i = ij_index(idx1, 0);
unsigned int j = ij_index(idx2, 0);
unsigned int k = ij_index(idx2, 1);
if (i != j && i != k) {
int idx3 = IJToPointIndex(i, j, num_points);
int idx4 = IJToPointIndex(i, k, num_points);
K.row(counter_k_row) =
MatrixToConstraint(V.row(idx1).transpose() * V.row(idx2)-
V.row(idx3).transpose() * V.row(idx4),
num_k_columns,
num_lambda);
++counter_k_row;
}
}
}
// Construct the second part of the K matrix corresponding to (t_ii,t_jk) for
// j==k.
for (int idx1 = num_m_rows; idx1 < num_tt_variables; ++idx1) {
for (int idx2 = idx1 + 1; idx2 < num_tt_variables; ++idx2) {
unsigned int i = ij_index(idx1, 0);
unsigned int j = ij_index(idx2, 0);
unsigned int k = ij_index(idx2, 1);
int idx3 = IJToPointIndex(i, j, num_points);
int idx4 = IJToPointIndex(i, k, num_points);
K.row(counter_k_row) =
MatrixToConstraint(V.row(idx1).transpose() * V.row(idx2)-
V.row(idx3).transpose() * V.row(idx4),
num_k_columns,
num_lambda);
++counter_k_row;
}
}
Vec L_sq = K.jacobiSvd(Eigen::ComputeFullV).matrixV().col(num_k_columns - 1);
// Pivot on the largest element for numerical stability. Afterwards recover
// the sign of the lambda solution.
double max_L_sq_value = fabs(L_sq(IJToIndex(0, 0, num_lambda)));
int max_L_sq_index = 1;
for (int i = 1; i < num_lambda; ++i) {
double abs_sq_value = fabs(L_sq(IJToIndex(i, i, num_lambda)));
if (max_L_sq_value < abs_sq_value) {
max_L_sq_value = abs_sq_value;
max_L_sq_index = i;
}
}
// Ensure positiveness of the largest value corresponding to lambda_ii.
L_sq = L_sq * Sign(L_sq(IJToIndex(max_L_sq_index,
max_L_sq_index,
num_lambda)));
Vec L(num_lambda);
L(max_L_sq_index) = sqrt(L_sq(IJToIndex(max_L_sq_index,
max_L_sq_index,
num_lambda)));
for (int i = 0; i < num_lambda; ++i) {
if (i != max_L_sq_index) {
L(i) = L_sq(IJToIndex(max_L_sq_index, i, num_lambda)) / L(max_L_sq_index);
}
}
// Correct the scale using the fact that the last constraint is equal to 1.
L = L / (V.row(num_m_columns - 1).dot(L));
Vec X = V * L;
// Recover the distances from the camera center to the 3D points Q.
Vec d(num_points);
d.setZero();
for (int c_point = num_m_rows; c_point < num_tt_variables; ++c_point) {
d(c_point - num_m_rows) = sqrt(X(c_point));
}
// Create the 3D points in the camera system.
Mat X_cam(3, num_points);
for (int c_point = 0; c_point < num_points; ++c_point) {
X_cam.col(c_point) = d(c_point) * x_camera_unit.col(c_point);
}
// Recover the camera translation and rotation.
AbsoluteOrientation(X_world, X_cam, R, t);
}
// Selects 4 virtual control points using mean and PCA.
static void SelectControlPoints(const Mat3X &X_world,
Mat *X_centered,
Mat34 *X_control_points) {
size_t num_points = X_world.cols();
// The first virtual control point, C0, is the centroid.
Vec mean, variance;
MeanAndVarianceAlongRows(X_world, &mean, &variance);
X_control_points->col(0) = mean;
// Computes PCA
X_centered->resize(3, num_points);
for (size_t c = 0; c < num_points; c++) {
X_centered->col(c) = X_world.col(c) - mean;
}
Mat3 X_centered_sq = (*X_centered) * X_centered->transpose();
Eigen::JacobiSVD<Mat3> X_centered_sq_svd(X_centered_sq, Eigen::ComputeFullU);
Vec3 w = X_centered_sq_svd.singularValues();
Mat3 u = X_centered_sq_svd.matrixU();
for (size_t c = 0; c < 3; c++) {
double k = sqrt(w(c) / num_points);
X_control_points->col(c + 1) = mean + k * u.col(c);
}
}
// Computes the barycentric coordinates for all real points
static void ComputeBarycentricCoordinates(const Mat3X &X_world_centered,
const Mat34 &X_control_points,
Mat4X *alphas) {
size_t num_points = X_world_centered.cols();
Mat3 C2;
for (size_t c = 1; c < 4; c++) {
C2.col(c-1) = X_control_points.col(c) - X_control_points.col(0);
}
Mat3 C2inv = C2.inverse();
Mat3X a = C2inv * X_world_centered;
alphas->resize(4, num_points);
alphas->setZero();
alphas->block(1, 0, 3, num_points) = a;
for (size_t c = 0; c < num_points; c++) {
(*alphas)(0, c) = 1.0 - alphas->col(c).sum();
}
}
// Estimates the coordinates of all real points in the camera coordinate frame
static void ComputePointsCoordinatesInCameraFrame(
const Mat4X &alphas,
const Vec4 &betas,
const Eigen::Matrix<double, 12, 12> &U,
Mat3X *X_camera) {
size_t num_points = alphas.cols();
// Estimates the control points in the camera reference frame.
Mat34 C2b; C2b.setZero();
for (size_t cu = 0; cu < 4; cu++) {
for (size_t c = 0; c < 4; c++) {
C2b.col(c) += betas(cu) * U.block(11 - cu, c * 3, 1, 3).transpose();
}
}
// Estimates the 3D points in the camera reference frame
X_camera->resize(3, num_points);
for (size_t c = 0; c < num_points; c++) {
X_camera->col(c) = C2b * alphas.col(c);
}
// Check the sign of the z coordinate of the points (should be positive)
uint num_z_neg = 0;
for (size_t i = 0; i < X_camera->cols(); ++i) {
if ((*X_camera)(2, i) < 0) {
num_z_neg++;
}
}
// If more than 50% of z are negative, we change the signs
if (num_z_neg > 0.5 * X_camera->cols()) {
C2b = -C2b;
*X_camera = -(*X_camera);
}
}
bool EuclideanResectionEPnP(const Mat2X &x_camera,
const Mat3X &X_world,
Mat3 *R, Vec3 *t) {
CHECK(x_camera.cols() == X_world.cols());
CHECK(x_camera.cols() > 3);
size_t num_points = X_world.cols();
// Select the control points.
Mat34 X_control_points;
Mat X_centered;
SelectControlPoints(X_world, &X_centered, &X_control_points);
// Compute the barycentric coordinates.
Mat4X alphas(4, num_points);
ComputeBarycentricCoordinates(X_centered, X_control_points, &alphas);
// Estimates the M matrix with the barycentric coordinates
Mat M(2 * num_points, 12);
Eigen::Matrix<double, 2, 12> sub_M;
for (size_t c = 0; c < num_points; c++) {
double a0 = alphas(0, c);
double a1 = alphas(1, c);
double a2 = alphas(2, c);
double a3 = alphas(3, c);
double ui = x_camera(0, c);
double vi = x_camera(1, c);
M.block(2*c, 0, 2, 12) << a0, 0,
a0*(-ui), a1, 0,
a1*(-ui), a2, 0,
a2*(-ui), a3, 0,
a3*(-ui), 0,
a0, a0*(-vi), 0,
a1, a1*(-vi), 0,
a2, a2*(-vi), 0,
a3, a3*(-vi);
}
// TODO(julien): Avoid the transpose by rewriting the u2.block() calls.
Eigen::JacobiSVD<Mat> MtMsvd(M.transpose()*M, Eigen::ComputeFullU);
Eigen::Matrix<double, 12, 12> u2 = MtMsvd.matrixU().transpose();
// Estimate the L matrix.
Eigen::Matrix<double, 6, 3> dv1;
Eigen::Matrix<double, 6, 3> dv2;
Eigen::Matrix<double, 6, 3> dv3;
Eigen::Matrix<double, 6, 3> dv4;
dv1.row(0) = u2.block(11, 0, 1, 3) - u2.block(11, 3, 1, 3);
dv1.row(1) = u2.block(11, 0, 1, 3) - u2.block(11, 6, 1, 3);
dv1.row(2) = u2.block(11, 0, 1, 3) - u2.block(11, 9, 1, 3);
dv1.row(3) = u2.block(11, 3, 1, 3) - u2.block(11, 6, 1, 3);
dv1.row(4) = u2.block(11, 3, 1, 3) - u2.block(11, 9, 1, 3);
dv1.row(5) = u2.block(11, 6, 1, 3) - u2.block(11, 9, 1, 3);
dv2.row(0) = u2.block(10, 0, 1, 3) - u2.block(10, 3, 1, 3);
dv2.row(1) = u2.block(10, 0, 1, 3) - u2.block(10, 6, 1, 3);
dv2.row(2) = u2.block(10, 0, 1, 3) - u2.block(10, 9, 1, 3);
dv2.row(3) = u2.block(10, 3, 1, 3) - u2.block(10, 6, 1, 3);
dv2.row(4) = u2.block(10, 3, 1, 3) - u2.block(10, 9, 1, 3);
dv2.row(5) = u2.block(10, 6, 1, 3) - u2.block(10, 9, 1, 3);
dv3.row(0) = u2.block(9, 0, 1, 3) - u2.block(9, 3, 1, 3);
dv3.row(1) = u2.block(9, 0, 1, 3) - u2.block(9, 6, 1, 3);
dv3.row(2) = u2.block(9, 0, 1, 3) - u2.block(9, 9, 1, 3);
dv3.row(3) = u2.block(9, 3, 1, 3) - u2.block(9, 6, 1, 3);
dv3.row(4) = u2.block(9, 3, 1, 3) - u2.block(9, 9, 1, 3);
dv3.row(5) = u2.block(9, 6, 1, 3) - u2.block(9, 9, 1, 3);
dv4.row(0) = u2.block(8, 0, 1, 3) - u2.block(8, 3, 1, 3);
dv4.row(1) = u2.block(8, 0, 1, 3) - u2.block(8, 6, 1, 3);
dv4.row(2) = u2.block(8, 0, 1, 3) - u2.block(8, 9, 1, 3);
dv4.row(3) = u2.block(8, 3, 1, 3) - u2.block(8, 6, 1, 3);
dv4.row(4) = u2.block(8, 3, 1, 3) - u2.block(8, 9, 1, 3);
dv4.row(5) = u2.block(8, 6, 1, 3) - u2.block(8, 9, 1, 3);
Eigen::Matrix<double, 6, 10> L;
for (size_t r = 0; r < 6; r++) {
L.row(r) << dv1.row(r).dot(dv1.row(r)),
2.0 * dv1.row(r).dot(dv2.row(r)),
dv2.row(r).dot(dv2.row(r)),
2.0 * dv1.row(r).dot(dv3.row(r)),
2.0 * dv2.row(r).dot(dv3.row(r)),
dv3.row(r).dot(dv3.row(r)),
2.0 * dv1.row(r).dot(dv4.row(r)),
2.0 * dv2.row(r).dot(dv4.row(r)),
2.0 * dv3.row(r).dot(dv4.row(r)),
dv4.row(r).dot(dv4.row(r));
}
Vec6 rho;
rho << (X_control_points.col(0) - X_control_points.col(1)).squaredNorm(),
(X_control_points.col(0) - X_control_points.col(2)).squaredNorm(),
(X_control_points.col(0) - X_control_points.col(3)).squaredNorm(),
(X_control_points.col(1) - X_control_points.col(2)).squaredNorm(),
(X_control_points.col(1) - X_control_points.col(3)).squaredNorm(),
(X_control_points.col(2) - X_control_points.col(3)).squaredNorm();
// There are three possible solutions based on the three approximations of L
// (betas). Below, each one is solved for then the best one is chosen.
Mat3X X_camera;
Mat3 K; K.setIdentity();
vector<Mat3> Rs(3);
vector<Vec3> ts(3);
Vec rmse(3);
// At one point this threshold was 1e-3, and caused no end of problems in
// Blender by causing frames to not resect when they would have worked fine.
// When the resect failed, the projective followup is run leading to worse
// results, and often the dreaded "flipping" where objects get flipped
// between frames. Instead, disable the check for now, always succeeding. The
// ultimate check is always reprojection error anyway.
//
// TODO(keir): Decide if setting this to infinity, effectively disabling the
// check, is the right approach. So far this seems the case.
double kSuccessThreshold = std::numeric_limits<double>::max();
// Find the first possible solution for R, t corresponding to:
// Betas = [b00 b01 b11 b02 b12 b22 b03 b13 b23 b33]
// Betas_approx_1 = [b00 b01 b02 b03]
Vec4 betas = Vec4::Zero();
Eigen::Matrix<double, 6, 4> l_6x4;
for (size_t r = 0; r < 6; r++) {
l_6x4.row(r) << L(r, 0), L(r, 1), L(r, 3), L(r, 6);
}
Eigen::JacobiSVD<Mat> svd_of_l4(l_6x4,
Eigen::ComputeFullU | Eigen::ComputeFullV);
Vec4 b4 = svd_of_l4.solve(rho);
if ((l_6x4 * b4).isApprox(rho, kSuccessThreshold)) {
if (b4(0) < 0) {
b4 = -b4;
}
b4(0) = std::sqrt(b4(0));
betas << b4(0), b4(1) / b4(0), b4(2) / b4(0), b4(3) / b4(0);
ComputePointsCoordinatesInCameraFrame(alphas, betas, u2, &X_camera);
AbsoluteOrientation(X_world, X_camera, &Rs[0], &ts[0]);
rmse(0) = RootMeanSquareError(x_camera, X_world, K, Rs[0], ts[0]);
} else {
LOG(ERROR) << "First approximation of beta not good enough.";
ts[0].setZero();
rmse(0) = std::numeric_limits<double>::max();
}
// Find the second possible solution for R, t corresponding to:
// Betas = [b00 b01 b11 b02 b12 b22 b03 b13 b23 b33]
// Betas_approx_2 = [b00 b01 b11]
betas.setZero();
Eigen::Matrix<double, 6, 3> l_6x3;
l_6x3 = L.block(0, 0, 6, 3);
Eigen::JacobiSVD<Mat> svdOfL3(l_6x3,
Eigen::ComputeFullU | Eigen::ComputeFullV);
Vec3 b3 = svdOfL3.solve(rho);
VLOG(2) << " rho = " << rho;
VLOG(2) << " l_6x3 * b3 = " << l_6x3 * b3;
if ((l_6x3 * b3).isApprox(rho, kSuccessThreshold)) {
if (b3(0) < 0) {
betas(0) = std::sqrt(-b3(0));
betas(1) = (b3(2) < 0) ? std::sqrt(-b3(2)) : 0;
} else {
betas(0) = std::sqrt(b3(0));
betas(1) = (b3(2) > 0) ? std::sqrt(b3(2)) : 0;
}
if (b3(1) < 0) {
betas(0) = -betas(0);
}
betas(2) = 0;
betas(3) = 0;
ComputePointsCoordinatesInCameraFrame(alphas, betas, u2, &X_camera);
AbsoluteOrientation(X_world, X_camera, &Rs[1], &ts[1]);
rmse(1) = RootMeanSquareError(x_camera, X_world, K, Rs[1], ts[1]);
} else {
LOG(ERROR) << "Second approximation of beta not good enough.";
ts[1].setZero();
rmse(1) = std::numeric_limits<double>::max();
}
// Find the third possible solution for R, t corresponding to:
// Betas = [b00 b01 b11 b02 b12 b22 b03 b13 b23 b33]
// Betas_approx_3 = [b00 b01 b11 b02 b12]
betas.setZero();
Eigen::Matrix<double, 6, 5> l_6x5;
l_6x5 = L.block(0, 0, 6, 5);
Eigen::JacobiSVD<Mat> svdOfL5(l_6x5,
Eigen::ComputeFullU | Eigen::ComputeFullV);
Vec5 b5 = svdOfL5.solve(rho);
if ((l_6x5 * b5).isApprox(rho, kSuccessThreshold)) {
if (b5(0) < 0) {
betas(0) = std::sqrt(-b5(0));
if (b5(2) < 0) {
betas(1) = std::sqrt(-b5(2));
} else {
b5(2) = 0;
}
} else {
betas(0) = std::sqrt(b5(0));
if (b5(2) > 0) {
betas(1) = std::sqrt(b5(2));
} else {
b5(2) = 0;
}
}
if (b5(1) < 0) {
betas(0) = -betas(0);
}
betas(2) = b5(3) / betas(0);
betas(3) = 0;
ComputePointsCoordinatesInCameraFrame(alphas, betas, u2, &X_camera);
AbsoluteOrientation(X_world, X_camera, &Rs[2], &ts[2]);
rmse(2) = RootMeanSquareError(x_camera, X_world, K, Rs[2], ts[2]);
} else {
LOG(ERROR) << "Third approximation of beta not good enough.";
ts[2].setZero();
rmse(2) = std::numeric_limits<double>::max();
}
// Finally, with all three solutions, select the (R, t) with the best RMSE.
VLOG(2) << "RMSE for solution 0: " << rmse(0);
VLOG(2) << "RMSE for solution 1: " << rmse(1);
VLOG(2) << "RMSE for solution 2: " << rmse(2);
size_t n = 0;
if (rmse(1) < rmse(0)) {
n = 1;
}
if (rmse(2) < rmse(n)) {
n = 2;
}
if (rmse(n) == std::numeric_limits<double>::max()) {
LOG(ERROR) << "All three possibilities failed. Reporting failure.";
return false;
}
VLOG(1) << "RMSE for best solution #" << n << ": " << rmse(n);
*R = Rs[n];
*t = ts[n];
// TODO(julien): Improve the solutions with non-linear refinement.
return true;
}
/*
Straight from the paper:
http://www.diegm.uniud.it/fusiello/papers/3dimpvt12-b.pdf
function [R T] = ppnp(P,S,tol)
% input
% P : matrix (nx3) image coordinates in camera reference [u v 1]
% S : matrix (nx3) coordinates in world reference [X Y Z]
% tol: exit threshold
%
% output
% R : matrix (3x3) rotation (world-to-camera)
% T : vector (3x1) translation (world-to-camera)
%
n = size(P,1);
Z = zeros(n);
e = ones(n,1);
A = eye(n)-((e*e)./n);
II = e./n;
err = +Inf;
E_old = 1000*ones(n,3);
while err>tol
[U,˜,V] = svd(P*Z*A*S);
VT = V;
R=U*[1 0 0; 0 1 0; 0 0 det(U*VT)]*VT;
PR = P*R;
c = (S-Z*PR)*II;
Y = S-e*c;
Zmindiag = diag(PR*Y)./(sum(P.*P,2));
Zmindiag(Zmindiag<0)=0;
Z = diag(Zmindiag);
E = Y-Z*PR;
err = norm(E-E_old,fro);
E_old = E;
end
T = -R*c;
end
*/
// TODO(keir): Re-do all the variable names and add comments matching the paper.
// This implementation has too much of the terseness of the original. On the
// other hand, it did work on the first try.
bool EuclideanResectionPPnP(const Mat2X &x_camera,
const Mat3X &X_world,
Mat3 *R, Vec3 *t) {
int n = x_camera.cols();
Mat Z = Mat::Zero(n, n);
Vec e = Vec::Ones(n);
Mat A = Mat::Identity(n, n) - (e * e.transpose() / n);
Vec II = e / n;
Mat P(n, 3);
P.col(0) = x_camera.row(0);
P.col(1) = x_camera.row(1);
P.col(2).setConstant(1.0);
Mat S = X_world.transpose();
double error = std::numeric_limits<double>::infinity();
Mat E_old = 1000 * Mat::Ones(n, 3);
Vec3 c;
Mat E(n, 3);
int iteration = 0;
double tolerance = 1e-5;
// TODO(keir): The limit of 100 can probably be reduced, but this will require
// some investigation.
while (error > tolerance && iteration < 100) {
Mat3 tmp = P.transpose() * Z * A * S;
Eigen::JacobiSVD<Mat3> svd(tmp, Eigen::ComputeFullU | Eigen::ComputeFullV);
Mat3 U = svd.matrixU();
Mat3 VT = svd.matrixV().transpose();
Vec3 s;
s << 1, 1, (U * VT).determinant();
*R = U * s.asDiagonal() * VT;
Mat PR = P * *R; // n x 3
c = (S - Z*PR).transpose() * II;
Mat Y = S - e*c.transpose(); // n x 3
Vec Zmindiag = (PR * Y.transpose()).diagonal()
.cwiseQuotient(P.rowwise().squaredNorm());
for (int i = 0; i < n; ++i) {
Zmindiag[i] = std::max(Zmindiag[i], 0.0);
}
Z = Zmindiag.asDiagonal();
E = Y - Z*PR;
error = (E - E_old).norm();
LOG(INFO) << "PPnP error(" << (iteration++) << "): " << error;
E_old = E;
}
*t = -*R*c;
// TODO(keir): Figure out what the failure cases are. Is it too many
// iterations? Spend some time going through the math figuring out if there
// is some way to detect that the algorithm is going crazy, and return false.
return true;
}
} // namespace resection
} // namespace libmv

View File

@@ -1,148 +0,0 @@
// Copyright (c) 2010 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_MULTIVIEW_EUCLIDEAN_RESECTION_H_
#define LIBMV_MULTIVIEW_EUCLIDEAN_RESECTION_H_
#include "libmv/numeric/numeric.h"
#include "libmv/multiview/projection.h"
namespace libmv {
namespace euclidean_resection {
enum ResectionMethod {
RESECTION_ANSAR_DANIILIDIS,
// The "EPnP" algorithm by Lepetit et al.
// http://cvlab.epfl.ch/~lepetit/papers/lepetit_ijcv08.pdf
RESECTION_EPNP,
// The Procrustes PNP algorithm ("PPnP")
// http://www.diegm.uniud.it/fusiello/papers/3dimpvt12-b.pdf
RESECTION_PPNP
};
/**
* Computes the extrinsic parameters, R and t for a calibrated camera
* from 4 or more 3D points and their normalized images.
*
* \param x_camera Image points in normalized camera coordinates e.g. x_camera
* = inv(K) * x_image.
* \param X_world 3D points in the world coordinate system
* \param R Solution for the camera rotation matrix
* \param t Solution for the camera translation vector
* \param method The resection method to use.
*/
bool EuclideanResection(const Mat2X &x_camera,
const Mat3X &X_world,
Mat3 *R, Vec3 *t,
ResectionMethod method = RESECTION_EPNP);
/**
* Computes the extrinsic parameters, R and t for a calibrated camera
* from 4 or more 3D points and their images.
*
* \param x_image Image points in non-normalized image coordinates. The
* coordates are laid out one per row. The matrix can be Nx2
* or Nx3 for euclidean or homogenous 2D coordinates.
* \param X_world 3D points in the world coordinate system
* \param K Intrinsic parameters camera matrix
* \param R Solution for the camera rotation matrix
* \param t Solution for the camera translation vector
* \param method Resection method
*/
bool EuclideanResection(const Mat &x_image,
const Mat3X &X_world,
const Mat3 &K,
Mat3 *R, Vec3 *t,
ResectionMethod method = RESECTION_EPNP);
/**
* The absolute orientation algorithm recovers the transformation between a set
* of 3D points, X and Xp such that:
*
* Xp = R*X + t
*
* The recovery of the absolute orientation is implemented after this article:
* Horn, Hilden, "Closed-form solution of absolute orientation using
* orthonormal matrices"
*/
void AbsoluteOrientation(const Mat3X &X,
const Mat3X &Xp,
Mat3 *R,
Vec3 *t);
/**
* Computes the extrinsic parameters, R and t for a calibrated camera from 4 or
* more 3D points and their images.
*
* \param x_camera Image points in normalized camera coordinates, e.g.
* x_camera=inv(K)*x_image
* \param X_world 3D points in the world coordinate system
* \param R Solution for the camera rotation matrix
* \param t Solution for the camera translation vector
*
* This is the algorithm described in: "Linear Pose Estimation from Points or
* Lines", by Ansar, A. and Daniilidis, PAMI 2003. vol. 25, no. 5.
*/
void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
const Mat3X &X_world,
Mat3 *R, Vec3 *t);
/**
* Computes the extrinsic parameters, R and t for a calibrated camera from 4 or
* more 3D points and their images.
*
* \param x_camera Image points in normalized camera coordinates,
* e.g. x_camera = inv(K) * x_image
* \param X_world 3D points in the world coordinate system
* \param R Solution for the camera rotation matrix
* \param t Solution for the camera translation vector
*
* This is the algorithm described in:
* "{EP$n$P: An Accurate $O(n)$ Solution to the P$n$P Problem", by V. Lepetit
* and F. Moreno-Noguer and P. Fua, IJCV 2009. vol. 81, no. 2
* \note: the non-linear optimization is not implemented here.
*/
bool EuclideanResectionEPnP(const Mat2X &x_camera,
const Mat3X &X_world,
Mat3 *R, Vec3 *t);
/**
* Computes the extrinsic parameters, R and t for a calibrated camera from 4 or
* more 3D points and their images.
*
* \param x_camera Image points in normalized camera coordinates,
* e.g. x_camera = inv(K) * x_image
* \param X_world 3D points in the world coordinate system
* \param R Solution for the camera rotation matrix
* \param t Solution for the camera translation vector
*
* Straight from the paper:
* http://www.diegm.uniud.it/fusiello/papers/3dimpvt12-b.pdf
*/
bool EuclideanResectionPPnP(const Mat2X &x_camera,
const Mat3X &X_world,
Mat3 *R, Vec3 *t);
} // namespace euclidean_resection
} // namespace libmv
#endif /* LIBMV_MULTIVIEW_EUCLIDEAN_RESECTION_H_ */

View File

@@ -1,237 +0,0 @@
// Copyright (c) 2009 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/multiview/euclidean_resection.h"
#include "libmv/numeric/numeric.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/projection.h"
#include "testing/testing.h"
using namespace libmv::euclidean_resection;
using namespace libmv;
// Generates all necessary inputs and expected outputs for EuclideanResection.
void CreateCameraSystem(const Mat3& KK,
const Mat3X& x_image,
const Vec& X_distances,
const Mat3& R_input,
const Vec3& T_input,
Mat2X *x_camera,
Mat3X *X_world,
Mat3 *R_expected,
Vec3 *T_expected) {
int num_points = x_image.cols();
Mat3X x_unit_cam(3, num_points);
x_unit_cam = KK.inverse() * x_image;
// Create normalized camera coordinates to be used as an input to the PnP
// function, instead of using NormalizeColumnVectors(&x_unit_cam).
*x_camera = x_unit_cam.block(0, 0, 2, num_points);
for (int i = 0; i < num_points; ++i) {
x_unit_cam.col(i).normalize();
}
// Create the 3D points in the camera system.
Mat X_camera(3, num_points);
for (int i = 0; i < num_points; ++i) {
X_camera.col(i) = X_distances(i) * x_unit_cam.col(i);
}
// Apply the transformation to the camera 3D points
Mat translation_matrix(3, num_points);
translation_matrix.row(0).setConstant(T_input(0));
translation_matrix.row(1).setConstant(T_input(1));
translation_matrix.row(2).setConstant(T_input(2));
*X_world = R_input * X_camera + translation_matrix;
// Create the expected result for comparison.
*R_expected = R_input.transpose();
*T_expected = *R_expected * (-T_input);
};
TEST(AbsoluteOrientation, QuaternionSolution) {
int num_points = 4;
Mat X;
Mat Xp;
X = 100 * Mat::Random(3, num_points);
// Create a random translation and rotation.
Mat3 R_input;
R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ())
* Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY())
* Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
Vec3 t_input;
t_input.setRandom();
t_input = 100 * t_input;
Mat translation_matrix(3, num_points);
translation_matrix.row(0).setConstant(t_input(0));
translation_matrix.row(1).setConstant(t_input(1));
translation_matrix.row(2).setConstant(t_input(2));
// Create the transformed 3D points Xp as Xp = R * X + t.
Xp = R_input * X + translation_matrix;
// Output variables.
Mat3 R;
Vec3 t;
AbsoluteOrientation(X, Xp, &R, &t);
EXPECT_MATRIX_NEAR(t, t_input, 1e-6);
EXPECT_MATRIX_NEAR(R, R_input, 1e-8);
}
TEST(EuclideanResection, Points4KnownImagePointsRandomTranslationRotation) {
// In this test only the translation and rotation are random. The image
// points are selected from a real case and are well conditioned.
Vec2i image_dimensions;
image_dimensions << 1600, 1200;
Mat3 KK;
KK << 2796, 0, 804,
0 , 2796, 641,
0, 0, 1;
// The real image points.
int num_points = 4;
Mat3X x_image(3, num_points);
x_image << 1164.06, 734.948, 749.599, 430.727,
681.386, 844.59, 496.315, 580.775,
1, 1, 1, 1;
// A vector of the 4 distances to the 3D points.
Vec X_distances = 100 * Vec::Random(num_points).array().abs();
// Create the random camera motion R and t that resection should recover.
Mat3 R_input;
R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ())
* Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY())
* Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
Vec3 T_input;
T_input.setRandom();
T_input = 100 * T_input;
// Create the camera system, also getting the expected result of the
// transformation.
Mat3 R_expected;
Vec3 T_expected;
Mat3X X_world;
Mat2X x_camera;
CreateCameraSystem(KK, x_image, X_distances, R_input, T_input,
&x_camera, &X_world, &R_expected, &T_expected);
// Finally, run the code under test.
Mat3 R_output;
Vec3 T_output;
EuclideanResection(x_camera, X_world,
&R_output, &T_output,
RESECTION_ANSAR_DANIILIDIS);
EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
// For now, the EPnP doesn't have a non-linear optimization step and so is
// not precise enough with only 4 points.
//
// TODO(jmichot): Reenable this test when there is nonlinear refinement.
#if 0
R_output.setIdentity();
T_output.setZero();
EuclideanResection(x_camera, X_world,
&R_output, &T_output,
RESECTION_EPNP);
EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);*/
#endif
}
// TODO(jmichot): Reduce the code duplication here with the code above.
TEST(EuclideanResection, Points6AllRandomInput) {
Mat3 KK;
KK << 2796, 0, 804,
0 , 2796, 641,
0, 0, 1;
// Create random image points for a 1600x1200 image.
int w = 1600;
int h = 1200;
int num_points = 6;
Mat3X x_image(3, num_points);
x_image.row(0) = w * Vec::Random(num_points).array().abs();
x_image.row(1) = h * Vec::Random(num_points).array().abs();
x_image.row(2).setOnes();
// Normalized camera coordinates to be used as an input to the PnP function.
Mat2X x_camera;
Vec X_distances = 100 * Vec::Random(num_points).array().abs();
// Create the random camera motion R and t that resection should recover.
Mat3 R_input;
R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ())
* Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY())
* Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
Vec3 T_input;
T_input.setRandom();
T_input = 100 * T_input;
// Create the camera system.
Mat3 R_expected;
Vec3 T_expected;
Mat3X X_world;
CreateCameraSystem(KK, x_image, X_distances, R_input, T_input,
&x_camera, &X_world, &R_expected, &T_expected);
// Test each of the resection methods.
{
Mat3 R_output;
Vec3 T_output;
EuclideanResection(x_camera, X_world,
&R_output, &T_output,
RESECTION_ANSAR_DANIILIDIS);
EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
}
{
Mat3 R_output;
Vec3 T_output;
EuclideanResection(x_camera, X_world,
&R_output, &T_output,
RESECTION_EPNP);
EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
}
{
Mat3 R_output;
Vec3 T_output;
EuclideanResection(x_image, X_world, KK,
&R_output, &T_output);
EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
}
}

View File

@@ -1,544 +0,0 @@
// Copyright (c) 2007, 2008 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/multiview/fundamental.h"
#include "ceres/ceres.h"
#include "libmv/logging/logging.h"
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/poly.h"
#include "libmv/multiview/conditioning.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/triangulation.h"
namespace libmv {
static void EliminateRow(const Mat34 &P, int row, Mat *X) {
X->resize(2, 4);
int first_row = (row + 1) % 3;
int second_row = (row + 2) % 3;
for (int i = 0; i < 4; ++i) {
(*X)(0, i) = P(first_row, i);
(*X)(1, i) = P(second_row, i);
}
}
void ProjectionsFromFundamental(const Mat3 &F, Mat34 *P1, Mat34 *P2) {
*P1 << Mat3::Identity(), Vec3::Zero();
Vec3 e2;
Mat3 Ft = F.transpose();
Nullspace(&Ft, &e2);
*P2 << CrossProductMatrix(e2) * F, e2;
}
// Addapted from vgg_F_from_P.
void FundamentalFromProjections(const Mat34 &P1, const Mat34 &P2, Mat3 *F) {
Mat X[3];
Mat Y[3];
Mat XY;
for (int i = 0; i < 3; ++i) {
EliminateRow(P1, i, X + i);
EliminateRow(P2, i, Y + i);
}
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
VerticalStack(X[j], Y[i], &XY);
(*F)(i, j) = XY.determinant();
}
}
}
// HZ 11.1 pag.279 (x1 = x, x2 = x')
// http://www.cs.unc.edu/~marc/tutorial/node54.html
static double EightPointSolver(const Mat &x1, const Mat &x2, Mat3 *F) {
DCHECK_EQ(x1.rows(), 2);
DCHECK_GE(x1.cols(), 8);
DCHECK_EQ(x1.rows(), x2.rows());
DCHECK_EQ(x1.cols(), x2.cols());
int n = x1.cols();
Mat A(n, 9);
for (int i = 0; i < n; ++i) {
A(i, 0) = x2(0, i) * x1(0, i);
A(i, 1) = x2(0, i) * x1(1, i);
A(i, 2) = x2(0, i);
A(i, 3) = x2(1, i) * x1(0, i);
A(i, 4) = x2(1, i) * x1(1, i);
A(i, 5) = x2(1, i);
A(i, 6) = x1(0, i);
A(i, 7) = x1(1, i);
A(i, 8) = 1;
}
Vec9 f;
double smaller_singular_value = Nullspace(&A, &f);
*F = Map<RMat3>(f.data());
return smaller_singular_value;
}
// HZ 11.1.1 pag.280
void EnforceFundamentalRank2Constraint(Mat3 *F) {
Eigen::JacobiSVD<Mat3> USV(*F, Eigen::ComputeFullU | Eigen::ComputeFullV);
Vec3 d = USV.singularValues();
d(2) = 0.0;
*F = USV.matrixU() * d.asDiagonal() * USV.matrixV().transpose();
}
// HZ 11.2 pag.281 (x1 = x, x2 = x')
double NormalizedEightPointSolver(const Mat &x1,
const Mat &x2,
Mat3 *F) {
DCHECK_EQ(x1.rows(), 2);
DCHECK_GE(x1.cols(), 8);
DCHECK_EQ(x1.rows(), x2.rows());
DCHECK_EQ(x1.cols(), x2.cols());
// Normalize the data.
Mat3 T1, T2;
PreconditionerFromPoints(x1, &T1);
PreconditionerFromPoints(x2, &T2);
Mat x1_normalized, x2_normalized;
ApplyTransformationToPoints(x1, T1, &x1_normalized);
ApplyTransformationToPoints(x2, T2, &x2_normalized);
// Estimate the fundamental matrix.
double smaller_singular_value =
EightPointSolver(x1_normalized, x2_normalized, F);
EnforceFundamentalRank2Constraint(F);
// Denormalize the fundamental matrix.
*F = T2.transpose() * (*F) * T1;
return smaller_singular_value;
}
// Seven-point algorithm.
// http://www.cs.unc.edu/~marc/tutorial/node55.html
double FundamentalFrom7CorrespondencesLinear(const Mat &x1,
const Mat &x2,
std::vector<Mat3> *F) {
DCHECK_EQ(x1.rows(), 2);
DCHECK_EQ(x1.cols(), 7);
DCHECK_EQ(x1.rows(), x2.rows());
DCHECK_EQ(x2.cols(), x2.cols());
// Build a 9 x n matrix from point matches, where each row is equivalent to
// the equation x'T*F*x = 0 for a single correspondence pair (x', x). The
// domain of the matrix is a 9 element vector corresponding to F. The
// nullspace should be rank two; the two dimensions correspond to the set of
// F matrices satisfying the epipolar geometry.
Matrix<double, 7, 9> A;
for (int ii = 0; ii < 7; ++ii) {
A(ii, 0) = x1(0, ii) * x2(0, ii); // 0 represents x coords,
A(ii, 1) = x1(1, ii) * x2(0, ii); // 1 represents y coords.
A(ii, 2) = x2(0, ii);
A(ii, 3) = x1(0, ii) * x2(1, ii);
A(ii, 4) = x1(1, ii) * x2(1, ii);
A(ii, 5) = x2(1, ii);
A(ii, 6) = x1(0, ii);
A(ii, 7) = x1(1, ii);
A(ii, 8) = 1.0;
}
// Find the two F matrices in the nullspace of A.
Vec9 f1, f2;
double s = Nullspace2(&A, &f1, &f2);
Mat3 F1 = Map<RMat3>(f1.data());
Mat3 F2 = Map<RMat3>(f2.data());
// Then, use the condition det(F) = 0 to determine F. In other words, solve
// det(F1 + a*F2) = 0 for a.
double a = F1(0, 0), j = F2(0, 0),
b = F1(0, 1), k = F2(0, 1),
c = F1(0, 2), l = F2(0, 2),
d = F1(1, 0), m = F2(1, 0),
e = F1(1, 1), n = F2(1, 1),
f = F1(1, 2), o = F2(1, 2),
g = F1(2, 0), p = F2(2, 0),
h = F1(2, 1), q = F2(2, 1),
i = F1(2, 2), r = F2(2, 2);
// Run fundamental_7point_coeffs.py to get the below coefficients.
// The coefficients are in ascending powers of alpha, i.e. P[N]*x^N.
double P[4] = {
a*e*i + b*f*g + c*d*h - a*f*h - b*d*i - c*e*g,
a*e*r + a*i*n + b*f*p + b*g*o + c*d*q + c*h*m + d*h*l + e*i*j + f*g*k -
a*f*q - a*h*o - b*d*r - b*i*m - c*e*p - c*g*n - d*i*k - e*g*l - f*h*j,
a*n*r + b*o*p + c*m*q + d*l*q + e*j*r + f*k*p + g*k*o + h*l*m + i*j*n -
a*o*q - b*m*r - c*n*p - d*k*r - e*l*p - f*j*q - g*l*n - h*j*o - i*k*m,
j*n*r + k*o*p + l*m*q - j*o*q - k*m*r - l*n*p,
};
// Solve for the roots of P[3]*x^3 + P[2]*x^2 + P[1]*x + P[0] = 0.
double roots[3];
int num_roots = SolveCubicPolynomial(P, roots);
// Build the fundamental matrix for each solution.
for (int kk = 0; kk < num_roots; ++kk) {
F->push_back(F1 + roots[kk] * F2);
}
return s;
}
double FundamentalFromCorrespondences7Point(const Mat &x1,
const Mat &x2,
std::vector<Mat3> *F) {
DCHECK_EQ(x1.rows(), 2);
DCHECK_GE(x1.cols(), 7);
DCHECK_EQ(x1.rows(), x2.rows());
DCHECK_EQ(x1.cols(), x2.cols());
// Normalize the data.
Mat3 T1, T2;
PreconditionerFromPoints(x1, &T1);
PreconditionerFromPoints(x2, &T2);
Mat x1_normalized, x2_normalized;
ApplyTransformationToPoints(x1, T1, &x1_normalized);
ApplyTransformationToPoints(x2, T2, &x2_normalized);
// Estimate the fundamental matrix.
double smaller_singular_value =
FundamentalFrom7CorrespondencesLinear(x1_normalized, x2_normalized, &(*F));
for (int k = 0; k < F->size(); ++k) {
Mat3 & Fmat = (*F)[k];
// Denormalize the fundamental matrix.
Fmat = T2.transpose() * Fmat * T1;
}
return smaller_singular_value;
}
void NormalizeFundamental(const Mat3 &F, Mat3 *F_normalized) {
*F_normalized = F / FrobeniusNorm(F);
if ((*F_normalized)(2, 2) < 0) {
*F_normalized *= -1;
}
}
double SampsonDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2) {
Vec3 x(x1(0), x1(1), 1.0);
Vec3 y(x2(0), x2(1), 1.0);
Vec3 F_x = F * x;
Vec3 Ft_y = F.transpose() * y;
double y_F_x = y.dot(F_x);
return Square(y_F_x) / ( F_x.head<2>().squaredNorm()
+ Ft_y.head<2>().squaredNorm());
}
double SymmetricEpipolarDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2) {
Vec3 x(x1(0), x1(1), 1.0);
Vec3 y(x2(0), x2(1), 1.0);
Vec3 F_x = F * x;
Vec3 Ft_y = F.transpose() * y;
double y_F_x = y.dot(F_x);
return Square(y_F_x) * ( 1 / F_x.head<2>().squaredNorm()
+ 1 / Ft_y.head<2>().squaredNorm());
}
// HZ 9.6 pag 257 (formula 9.12)
void EssentialFromFundamental(const Mat3 &F,
const Mat3 &K1,
const Mat3 &K2,
Mat3 *E) {
*E = K2.transpose() * F * K1;
}
// HZ 9.6 pag 257 (formula 9.12)
// Or http://ai.stanford.edu/~birch/projective/node20.html
void FundamentalFromEssential(const Mat3 &E,
const Mat3 &K1,
const Mat3 &K2,
Mat3 *F) {
*F = K2.inverse().transpose() * E * K1.inverse();
}
void RelativeCameraMotion(const Mat3 &R1,
const Vec3 &t1,
const Mat3 &R2,
const Vec3 &t2,
Mat3 *R,
Vec3 *t) {
*R = R2 * R1.transpose();
*t = t2 - (*R) * t1;
}
// HZ 9.6 pag 257
void EssentialFromRt(const Mat3 &R1,
const Vec3 &t1,
const Mat3 &R2,
const Vec3 &t2,
Mat3 *E) {
Mat3 R;
Vec3 t;
RelativeCameraMotion(R1, t1, R2, t2, &R, &t);
Mat3 Tx = CrossProductMatrix(t);
*E = Tx * R;
}
// HZ 9.6 pag 259 (Result 9.19)
void MotionFromEssential(const Mat3 &E,
std::vector<Mat3> *Rs,
std::vector<Vec3> *ts) {
Eigen::JacobiSVD<Mat3> USV(E, Eigen::ComputeFullU | Eigen::ComputeFullV);
Mat3 U = USV.matrixU();
Mat3 Vt = USV.matrixV().transpose();
// Last column of U is undetermined since d = (a a 0).
if (U.determinant() < 0) {
U.col(2) *= -1;
}
// Last row of Vt is undetermined since d = (a a 0).
if (Vt.determinant() < 0) {
Vt.row(2) *= -1;
}
Mat3 W;
W << 0, -1, 0,
1, 0, 0,
0, 0, 1;
Mat3 U_W_Vt = U * W * Vt;
Mat3 U_Wt_Vt = U * W.transpose() * Vt;
Rs->resize(4);
(*Rs)[0] = U_W_Vt;
(*Rs)[1] = U_W_Vt;
(*Rs)[2] = U_Wt_Vt;
(*Rs)[3] = U_Wt_Vt;
ts->resize(4);
(*ts)[0] = U.col(2);
(*ts)[1] = -U.col(2);
(*ts)[2] = U.col(2);
(*ts)[3] = -U.col(2);
}
int MotionFromEssentialChooseSolution(const std::vector<Mat3> &Rs,
const std::vector<Vec3> &ts,
const Mat3 &K1,
const Vec2 &x1,
const Mat3 &K2,
const Vec2 &x2) {
DCHECK_EQ(4, Rs.size());
DCHECK_EQ(4, ts.size());
Mat34 P1, P2;
Mat3 R1;
Vec3 t1;
R1.setIdentity();
t1.setZero();
P_From_KRt(K1, R1, t1, &P1);
for (int i = 0; i < 4; ++i) {
const Mat3 &R2 = Rs[i];
const Vec3 &t2 = ts[i];
P_From_KRt(K2, R2, t2, &P2);
Vec3 X;
TriangulateDLT(P1, x1, P2, x2, &X);
double d1 = Depth(R1, t1, X);
double d2 = Depth(R2, t2, X);
// Test if point is front to the two cameras.
if (d1 > 0 && d2 > 0) {
return i;
}
}
return -1;
}
bool MotionFromEssentialAndCorrespondence(const Mat3 &E,
const Mat3 &K1,
const Vec2 &x1,
const Mat3 &K2,
const Vec2 &x2,
Mat3 *R,
Vec3 *t) {
std::vector<Mat3> Rs;
std::vector<Vec3> ts;
MotionFromEssential(E, &Rs, &ts);
int solution = MotionFromEssentialChooseSolution(Rs, ts, K1, x1, K2, x2);
if (solution >= 0) {
*R = Rs[solution];
*t = ts[solution];
return true;
} else {
return false;
}
}
void FundamentalToEssential(const Mat3 &F, Mat3 *E) {
Eigen::JacobiSVD<Mat3> svd(F, Eigen::ComputeFullU | Eigen::ComputeFullV);
// See Hartley & Zisserman page 294, result 11.1, which shows how to get the
// closest essential matrix to a matrix that is "almost" an essential matrix.
double a = svd.singularValues()(0);
double b = svd.singularValues()(1);
double s = (a + b) / 2.0;
LG << "Initial reconstruction's rotation is non-euclidean by "
<< (((a - b) / std::max(a, b)) * 100) << "%; singular values:"
<< svd.singularValues().transpose();
Vec3 diag;
diag << s, s, 0;
*E = svd.matrixU() * diag.asDiagonal() * svd.matrixV().transpose();
}
// Default settings for fundamental estimation which should be suitable
// for a wide range of use cases.
EstimateFundamentalOptions::EstimateFundamentalOptions(void) :
max_num_iterations(50),
expected_average_symmetric_distance(1e-16) {
}
namespace {
// Cost functor which computes symmetric epipolar distance
// used for fundamental matrix refinement.
class FundamentalSymmetricEpipolarCostFunctor {
public:
FundamentalSymmetricEpipolarCostFunctor(const Vec2 &x,
const Vec2 &y)
: x_(x), y_(y) {}
template<typename T>
bool operator()(const T *fundamental_parameters, T *residuals) const {
typedef Eigen::Matrix<T, 3, 3> Mat3;
typedef Eigen::Matrix<T, 3, 1> Vec3;
Mat3 F(fundamental_parameters);
Vec3 x(T(x_(0)), T(x_(1)), T(1.0));
Vec3 y(T(y_(0)), T(y_(1)), T(1.0));
Vec3 F_x = F * x;
Vec3 Ft_y = F.transpose() * y;
T y_F_x = y.dot(F_x);
residuals[0] = y_F_x * T(1) / F_x.head(2).norm();
residuals[1] = y_F_x * T(1) / Ft_y.head(2).norm();
return true;
}
const Mat x_;
const Mat y_;
};
// Termination checking callback used for fundamental estimation.
// It finished the minimization as soon as actual average of
// symmetric epipolar distance is less or equal to the expected
// average value.
class TerminationCheckingCallback : public ceres::IterationCallback {
public:
TerminationCheckingCallback(const Mat &x1, const Mat &x2,
const EstimateFundamentalOptions &options,
Mat3 *F)
: options_(options), x1_(x1), x2_(x2), F_(F) {}
virtual ceres::CallbackReturnType operator()(
const ceres::IterationSummary& summary) {
// If the step wasn't successful, there's nothing to do.
if (!summary.step_is_successful) {
return ceres::SOLVER_CONTINUE;
}
// Calculate average of symmetric epipolar distance.
double average_distance = 0.0;
for (int i = 0; i < x1_.cols(); i++) {
average_distance = SymmetricEpipolarDistance(*F_,
x1_.col(i),
x2_.col(i));
}
average_distance /= x1_.cols();
if (average_distance <= options_.expected_average_symmetric_distance) {
return ceres::SOLVER_TERMINATE_SUCCESSFULLY;
}
return ceres::SOLVER_CONTINUE;
}
private:
const EstimateFundamentalOptions &options_;
const Mat &x1_;
const Mat &x2_;
Mat3 *F_;
};
} // namespace
/* Fundamental transformation estimation. */
bool EstimateFundamentalFromCorrespondences(
const Mat &x1,
const Mat &x2,
const EstimateFundamentalOptions &options,
Mat3 *F) {
// Step 1: Algebraic fundamental estimation.
// Assume algebraic estiation always succeeds,
NormalizedEightPointSolver(x1, x2, F);
LG << "Estimated matrix after algebraic estimation:\n" << *F;
// Step 2: Refine matrix using Ceres minimizer.
ceres::Problem problem;
for (int i = 0; i < x1.cols(); i++) {
FundamentalSymmetricEpipolarCostFunctor
*fundamental_symmetric_epipolar_cost_function =
new FundamentalSymmetricEpipolarCostFunctor(x1.col(i),
x2.col(i));
problem.AddResidualBlock(
new ceres::AutoDiffCostFunction<
FundamentalSymmetricEpipolarCostFunctor,
2, // num_residuals
9>(fundamental_symmetric_epipolar_cost_function),
NULL,
F->data());
}
// Configure the solve.
ceres::Solver::Options solver_options;
solver_options.linear_solver_type = ceres::DENSE_QR;
solver_options.max_num_iterations = options.max_num_iterations;
solver_options.update_state_every_iteration = true;
// Terminate if the average symmetric distance is good enough.
TerminationCheckingCallback callback(x1, x2, options, F);
solver_options.callbacks.push_back(&callback);
// Run the solve.
ceres::Solver::Summary summary;
ceres::Solve(solver_options, &problem, &summary);
VLOG(1) << "Summary:\n" << summary.FullReport();
LG << "Final refined matrix:\n" << *F;
return summary.IsSolutionUsable();
}
} // namespace libmv

View File

@@ -1,187 +0,0 @@
// Copyright (c) 2007, 2008, 2011 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_MULTIVIEW_FUNDAMENTAL_H_
#define LIBMV_MULTIVIEW_FUNDAMENTAL_H_
#include <vector>
#include "libmv/numeric/numeric.h"
namespace libmv {
void ProjectionsFromFundamental(const Mat3 &F, Mat34 *P1, Mat34 *P2);
void FundamentalFromProjections(const Mat34 &P1, const Mat34 &P2, Mat3 *F);
/**
* The normalized 8-point fundamental matrix solver.
*/
double NormalizedEightPointSolver(const Mat &x1,
const Mat &x2,
Mat3 *F);
/**
* 7 points (minimal case, points coordinates must be normalized before):
*/
double FundamentalFrom7CorrespondencesLinear(const Mat &x1,
const Mat &x2,
std::vector<Mat3> *F);
/**
* 7 points (points coordinates must be in image space):
*/
double FundamentalFromCorrespondences7Point(const Mat &x1,
const Mat &x2,
std::vector<Mat3> *F);
/**
* 8 points (points coordinates must be in image space):
*/
double NormalizedEightPointSolver(const Mat &x1,
const Mat &x2,
Mat3 *F);
/**
* Fundamental matrix utility function:
*/
void EnforceFundamentalRank2Constraint(Mat3 *F);
void NormalizeFundamental(const Mat3 &F, Mat3 *F_normalized);
/**
* Approximate squared reprojection errror.
*
* See page 287 of HZ equation 11.9. This avoids triangulating the point,
* relying only on the entries in F.
*/
double SampsonDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2);
/**
* Calculates the sum of the distances from the points to the epipolar lines.
*
* See page 288 of HZ equation 11.10.
*/
double SymmetricEpipolarDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2);
/**
* Compute the relative camera motion between two cameras.
*
* Given the motion parameters of two cameras, computes the motion parameters
* of the second one assuming the first one to be at the origin.
* If T1 and T2 are the camera motions, the computed relative motion is
* T = T2 T1^{-1}
*/
void RelativeCameraMotion(const Mat3 &R1,
const Vec3 &t1,
const Mat3 &R2,
const Vec3 &t2,
Mat3 *R,
Vec3 *t);
void EssentialFromFundamental(const Mat3 &F,
const Mat3 &K1,
const Mat3 &K2,
Mat3 *E);
void FundamentalFromEssential(const Mat3 &E,
const Mat3 &K1,
const Mat3 &K2,
Mat3 *F);
void EssentialFromRt(const Mat3 &R1,
const Vec3 &t1,
const Mat3 &R2,
const Vec3 &t2,
Mat3 *E);
void MotionFromEssential(const Mat3 &E,
std::vector<Mat3> *Rs,
std::vector<Vec3> *ts);
/**
* Choose one of the four possible motion solutions from an essential matrix.
*
* Decides the right solution by checking that the triangulation of a match
* x1--x2 lies in front of the cameras. See HZ 9.6 pag 259 (9.6.3 Geometrical
* interpretation of the 4 solutions)
*
* \return index of the right solution or -1 if no solution.
*/
int MotionFromEssentialChooseSolution(const std::vector<Mat3> &Rs,
const std::vector<Vec3> &ts,
const Mat3 &K1,
const Vec2 &x1,
const Mat3 &K2,
const Vec2 &x2);
bool MotionFromEssentialAndCorrespondence(const Mat3 &E,
const Mat3 &K1,
const Vec2 &x1,
const Mat3 &K2,
const Vec2 &x2,
Mat3 *R,
Vec3 *t);
/**
* Find closest essential matrix E to fundamental F
*/
void FundamentalToEssential(const Mat3 &F, Mat3 *E);
/**
* This structure contains options that controls how the fundamental
* estimation operates.
*
* Defaults should be suitable for a wide range of use cases, but
* better performance and accuracy might require tweaking/
*/
struct EstimateFundamentalOptions {
// Default constructor which sets up a options for generic usage.
EstimateFundamentalOptions(void);
// Maximal number of iterations for refinement step.
int max_num_iterations;
// Expected average of symmetric epipolar distance between
// actual destination points and original ones transformed by
// estimated fundamental matrix.
//
// Refinement will finish as soon as average of symmetric
// epipolar distance is less or equal to this value.
//
// This distance is measured in the same units as input points are.
double expected_average_symmetric_distance;
};
/**
* Fundamental transformation estimation.
*
* This function estimates the fundamental transformation from a list of 2D
* correspondences by doing algebraic estimation first followed with result
* refinement.
*/
bool EstimateFundamentalFromCorrespondences(
const Mat &x1,
const Mat &x2,
const EstimateFundamentalOptions &options,
Mat3 *F);
} // namespace libmv
#endif // LIBMV_MULTIVIEW_FUNDAMENTAL_H_

View File

@@ -1,162 +0,0 @@
// Copyright (c) 2007, 2008 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include <iostream>
#include "libmv/logging/logging.h"
#include "libmv/multiview/conditioning.h"
#include "libmv/multiview/fundamental.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/test_data_sets.h"
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
namespace {
using namespace libmv;
TEST(Fundamental, FundamentalFromProjections) {
Mat34 P1_gt, P2_gt;
P1_gt << 1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0;
P2_gt << 1, 1, 1, 3,
0, 2, 0, 3,
0, 1, 1, 0;
Mat3 F_gt;
FundamentalFromProjections(P1_gt, P2_gt, &F_gt);
Mat34 P1, P2;
ProjectionsFromFundamental(F_gt, &P1, &P2);
Mat3 F;
FundamentalFromProjections(P1, P2, &F);
EXPECT_MATRIX_PROP(F_gt, F, 1e-6);
}
TEST(Fundamental, PreconditionerFromPoints) {
int n = 4;
Mat points(2, n);
points << 0, 0, 1, 1,
0, 2, 1, 3;
Mat3 T;
PreconditionerFromPoints(points, &T);
Mat normalized_points;
ApplyTransformationToPoints(points, T, &normalized_points);
Vec mean, variance;
MeanAndVarianceAlongRows(normalized_points, &mean, &variance);
EXPECT_NEAR(0, mean(0), 1e-8);
EXPECT_NEAR(0, mean(1), 1e-8);
EXPECT_NEAR(2, variance(0), 1e-8);
EXPECT_NEAR(2, variance(1), 1e-8);
}
TEST(Fundamental, EssentialFromFundamental) {
TwoViewDataSet d = TwoRealisticCameras();
Mat3 E_from_Rt;
EssentialFromRt(d.R1, d.t1, d.R2, d.t2, &E_from_Rt);
Mat3 E_from_F;
EssentialFromFundamental(d.F, d.K1, d.K2, &E_from_F);
EXPECT_MATRIX_PROP(E_from_Rt, E_from_F, 1e-6);
}
TEST(Fundamental, MotionFromEssential) {
TwoViewDataSet d = TwoRealisticCameras();
Mat3 E;
EssentialFromRt(d.R1, d.t1, d.R2, d.t2, &E);
Mat3 R;
Vec3 t;
RelativeCameraMotion(d.R1, d.t1, d.R2, d.t2, &R, &t);
NormalizeL2(&t);
std::vector<Mat3> Rs;
std::vector<Vec3> ts;
MotionFromEssential(E, &Rs, &ts);
bool one_solution_is_correct = false;
for (size_t i = 0; i < Rs.size(); ++i) {
if (FrobeniusDistance(Rs[i], R) < 1e-8 && DistanceL2(ts[i], t) < 1e-8) {
one_solution_is_correct = true;
break;
}
}
EXPECT_TRUE(one_solution_is_correct);
}
TEST(Fundamental, MotionFromEssentialChooseSolution) {
TwoViewDataSet d = TwoRealisticCameras();
Mat3 E;
EssentialFromRt(d.R1, d.t1, d.R2, d.t2, &E);
Mat3 R;
Vec3 t;
RelativeCameraMotion(d.R1, d.t1, d.R2, d.t2, &R, &t);
NormalizeL2(&t);
std::vector<Mat3> Rs;
std::vector<Vec3> ts;
MotionFromEssential(E, &Rs, &ts);
Vec2 x1, x2;
MatrixColumn(d.x1, 0, &x1);
MatrixColumn(d.x2, 0, &x2);
int solution = MotionFromEssentialChooseSolution(Rs, ts, d.K1, x1, d.K2, x2);
EXPECT_LE(0, solution);
EXPECT_LE(solution, 3);
EXPECT_LE(FrobeniusDistance(Rs[solution], R), 1e-8);
EXPECT_LE(DistanceL2(ts[solution], t), 1e-8);
}
TEST(Fundamental, MotionFromEssentialAndCorrespondence) {
TwoViewDataSet d = TwoRealisticCameras();
Mat3 E;
EssentialFromRt(d.R1, d.t1, d.R2, d.t2, &E);
Mat3 R;
Vec3 t;
RelativeCameraMotion(d.R1, d.t1, d.R2, d.t2, &R, &t);
NormalizeL2(&t);
Vec2 x1, x2;
MatrixColumn(d.x1, 0, &x1);
MatrixColumn(d.x2, 0, &x2);
Mat3 R_estimated;
Vec3 t_estimated;
MotionFromEssentialAndCorrespondence(E, d.K1, x1, d.K2, x2,
&R_estimated, &t_estimated);
EXPECT_LE(FrobeniusDistance(R_estimated, R), 1e-8);
EXPECT_LE(DistanceL2(t_estimated, t), 1e-8);
}
} // namespace

View File

@@ -1,465 +0,0 @@
// Copyright (c) 2008, 2009 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/multiview/homography.h"
#include "ceres/ceres.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/conditioning.h"
#include "libmv/multiview/homography_parameterization.h"
namespace libmv {
/** 2D Homography transformation estimation in the case that points are in
* euclidean coordinates.
*
* x = H y
* x and y vector must have the same direction, we could write
* crossproduct(|x|, * H * |y| ) = |0|
*
* | 0 -1 x2| |a b c| |y1| |0|
* | 1 0 -x1| * |d e f| * |y2| = |0|
* |-x2 x1 0| |g h 1| |1 | |0|
*
* That gives :
*
* (-d+x2*g)*y1 + (-e+x2*h)*y2 + -f+x2 |0|
* (a-x1*g)*y1 + (b-x1*h)*y2 + c-x1 = |0|
* (-x2*a+x1*d)*y1 + (-x2*b+x1*e)*y2 + -x2*c+x1*f |0|
*/
static bool Homography2DFromCorrespondencesLinearEuc(
const Mat &x1,
const Mat &x2,
Mat3 *H,
double expected_precision) {
assert(2 == x1.rows());
assert(4 <= x1.cols());
assert(x1.rows() == x2.rows());
assert(x1.cols() == x2.cols());
int n = x1.cols();
MatX8 L = Mat::Zero(n * 3, 8);
Mat b = Mat::Zero(n * 3, 1);
for (int i = 0; i < n; ++i) {
int j = 3 * i;
L(j, 0) = x1(0, i); // a
L(j, 1) = x1(1, i); // b
L(j, 2) = 1.0; // c
L(j, 6) = -x2(0, i) * x1(0, i); // g
L(j, 7) = -x2(0, i) * x1(1, i); // h
b(j, 0) = x2(0, i); // i
++j;
L(j, 3) = x1(0, i); // d
L(j, 4) = x1(1, i); // e
L(j, 5) = 1.0; // f
L(j, 6) = -x2(1, i) * x1(0, i); // g
L(j, 7) = -x2(1, i) * x1(1, i); // h
b(j, 0) = x2(1, i); // i
// This ensures better stability
// TODO(julien) make a lite version without this 3rd set
++j;
L(j, 0) = x2(1, i) * x1(0, i); // a
L(j, 1) = x2(1, i) * x1(1, i); // b
L(j, 2) = x2(1, i); // c
L(j, 3) = -x2(0, i) * x1(0, i); // d
L(j, 4) = -x2(0, i) * x1(1, i); // e
L(j, 5) = -x2(0, i); // f
}
// Solve Lx=B
Vec h = L.fullPivLu().solve(b);
Homography2DNormalizedParameterization<double>::To(h, H);
if ((L * h).isApprox(b, expected_precision)) {
return true;
} else {
return false;
}
}
/** 2D Homography transformation estimation in the case that points are in
* homogeneous coordinates.
*
* | 0 -x3 x2| |a b c| |y1| -x3*d+x2*g -x3*e+x2*h -x3*f+x2*1 |y1| (-x3*d+x2*g)*y1 (-x3*e+x2*h)*y2 (-x3*f+x2*1)*y3 |0|
* | x3 0 -x1| * |d e f| * |y2| = x3*a-x1*g x3*b-x1*h x3*c-x1*1 * |y2| = (x3*a-x1*g)*y1 (x3*b-x1*h)*y2 (x3*c-x1*1)*y3 = |0|
* |-x2 x1 0| |g h 1| |y3| -x2*a+x1*d -x2*b+x1*e -x2*c+x1*f |y3| (-x2*a+x1*d)*y1 (-x2*b+x1*e)*y2 (-x2*c+x1*f)*y3 |0|
* X = |a b c d e f g h|^t
*/
bool Homography2DFromCorrespondencesLinear(const Mat &x1,
const Mat &x2,
Mat3 *H,
double expected_precision) {
if (x1.rows() == 2) {
return Homography2DFromCorrespondencesLinearEuc(x1, x2, H,
expected_precision);
}
assert(3 == x1.rows());
assert(4 <= x1.cols());
assert(x1.rows() == x2.rows());
assert(x1.cols() == x2.cols());
const int x = 0;
const int y = 1;
const int w = 2;
int n = x1.cols();
MatX8 L = Mat::Zero(n * 3, 8);
Mat b = Mat::Zero(n * 3, 1);
for (int i = 0; i < n; ++i) {
int j = 3 * i;
L(j, 0) = x2(w, i) * x1(x, i); // a
L(j, 1) = x2(w, i) * x1(y, i); // b
L(j, 2) = x2(w, i) * x1(w, i); // c
L(j, 6) = -x2(x, i) * x1(x, i); // g
L(j, 7) = -x2(x, i) * x1(y, i); // h
b(j, 0) = x2(x, i) * x1(w, i);
++j;
L(j, 3) = x2(w, i) * x1(x, i); // d
L(j, 4) = x2(w, i) * x1(y, i); // e
L(j, 5) = x2(w, i) * x1(w, i); // f
L(j, 6) = -x2(y, i) * x1(x, i); // g
L(j, 7) = -x2(y, i) * x1(y, i); // h
b(j, 0) = x2(y, i) * x1(w, i);
// This ensures better stability
++j;
L(j, 0) = x2(y, i) * x1(x, i); // a
L(j, 1) = x2(y, i) * x1(y, i); // b
L(j, 2) = x2(y, i) * x1(w, i); // c
L(j, 3) = -x2(x, i) * x1(x, i); // d
L(j, 4) = -x2(x, i) * x1(y, i); // e
L(j, 5) = -x2(x, i) * x1(w, i); // f
}
// Solve Lx=B
Vec h = L.fullPivLu().solve(b);
if ((L * h).isApprox(b, expected_precision)) {
Homography2DNormalizedParameterization<double>::To(h, H);
return true;
} else {
return false;
}
}
// Default settings for homography estimation which should be suitable
// for a wide range of use cases.
EstimateHomographyOptions::EstimateHomographyOptions(void) :
use_normalization(true),
max_num_iterations(50),
expected_average_symmetric_distance(1e-16) {
}
namespace {
void GetNormalizedPoints(const Mat &original_points,
Mat *normalized_points,
Mat3 *normalization_matrix) {
IsotropicPreconditionerFromPoints(original_points, normalization_matrix);
ApplyTransformationToPoints(original_points,
*normalization_matrix,
normalized_points);
}
// Cost functor which computes symmetric geometric distance
// used for homography matrix refinement.
class HomographySymmetricGeometricCostFunctor {
public:
HomographySymmetricGeometricCostFunctor(const Vec2 &x,
const Vec2 &y)
: x_(x), y_(y) { }
template<typename T>
bool operator()(const T *homography_parameters, T *residuals) const {
typedef Eigen::Matrix<T, 3, 3> Mat3;
typedef Eigen::Matrix<T, 3, 1> Vec3;
Mat3 H(homography_parameters);
Vec3 x(T(x_(0)), T(x_(1)), T(1.0));
Vec3 y(T(y_(0)), T(y_(1)), T(1.0));
Vec3 H_x = H * x;
Vec3 Hinv_y = H.inverse() * y;
H_x /= H_x(2);
Hinv_y /= Hinv_y(2);
// This is a forward error.
residuals[0] = H_x(0) - T(y_(0));
residuals[1] = H_x(1) - T(y_(1));
// This is a backward error.
residuals[2] = Hinv_y(0) - T(x_(0));
residuals[3] = Hinv_y(1) - T(x_(1));
return true;
}
const Vec2 x_;
const Vec2 y_;
};
// Termination checking callback used for homography estimation.
// It finished the minimization as soon as actual average of
// symmetric geometric distance is less or equal to the expected
// average value.
class TerminationCheckingCallback : public ceres::IterationCallback {
public:
TerminationCheckingCallback(const Mat &x1, const Mat &x2,
const EstimateHomographyOptions &options,
Mat3 *H)
: options_(options), x1_(x1), x2_(x2), H_(H) {}
virtual ceres::CallbackReturnType operator()(
const ceres::IterationSummary& summary) {
// If the step wasn't successful, there's nothing to do.
if (!summary.step_is_successful) {
return ceres::SOLVER_CONTINUE;
}
// Calculate average of symmetric geometric distance.
double average_distance = 0.0;
for (int i = 0; i < x1_.cols(); i++) {
average_distance = SymmetricGeometricDistance(*H_,
x1_.col(i),
x2_.col(i));
}
average_distance /= x1_.cols();
if (average_distance <= options_.expected_average_symmetric_distance) {
return ceres::SOLVER_TERMINATE_SUCCESSFULLY;
}
return ceres::SOLVER_CONTINUE;
}
private:
const EstimateHomographyOptions &options_;
const Mat &x1_;
const Mat &x2_;
Mat3 *H_;
};
} // namespace
/** 2D Homography transformation estimation in the case that points are in
* euclidean coordinates.
*/
bool EstimateHomography2DFromCorrespondences(
const Mat &x1,
const Mat &x2,
const EstimateHomographyOptions &options,
Mat3 *H) {
// TODO(sergey): Support homogenous coordinates, not just euclidean.
assert(2 == x1.rows());
assert(4 <= x1.cols());
assert(x1.rows() == x2.rows());
assert(x1.cols() == x2.cols());
Mat3 T1 = Mat3::Identity(),
T2 = Mat3::Identity();
// Step 1: Algebraic homography estimation.
Mat x1_normalized, x2_normalized;
if (options.use_normalization) {
LG << "Estimating homography using normalization.";
GetNormalizedPoints(x1, &x1_normalized, &T1);
GetNormalizedPoints(x2, &x2_normalized, &T2);
} else {
x1_normalized = x1;
x2_normalized = x2;
}
// Assume algebraic estiation always suceeds,
Homography2DFromCorrespondencesLinear(x1_normalized, x2_normalized, H);
// Denormalize the homography matrix.
if (options.use_normalization) {
*H = T2.inverse() * (*H) * T1;
}
LG << "Estimated matrix after algebraic estimation:\n" << *H;
// Step 2: Refine matrix using Ceres minimizer.
ceres::Problem problem;
for (int i = 0; i < x1.cols(); i++) {
HomographySymmetricGeometricCostFunctor
*homography_symmetric_geometric_cost_function =
new HomographySymmetricGeometricCostFunctor(x1.col(i),
x2.col(i));
problem.AddResidualBlock(
new ceres::AutoDiffCostFunction<
HomographySymmetricGeometricCostFunctor,
4, // num_residuals
9>(homography_symmetric_geometric_cost_function),
NULL,
H->data());
}
// Configure the solve.
ceres::Solver::Options solver_options;
solver_options.linear_solver_type = ceres::DENSE_QR;
solver_options.max_num_iterations = options.max_num_iterations;
solver_options.update_state_every_iteration = true;
// Terminate if the average symmetric distance is good enough.
TerminationCheckingCallback callback(x1, x2, options, H);
solver_options.callbacks.push_back(&callback);
// Run the solve.
ceres::Solver::Summary summary;
ceres::Solve(solver_options, &problem, &summary);
VLOG(1) << "Summary:\n" << summary.FullReport();
LG << "Final refined matrix:\n" << *H;
return summary.IsSolutionUsable();
}
/**
* x2 ~ A * x1
* x2^t * Hi * A *x1 = 0
* H1 = H2 = H3 =
* | 0 0 0 1| |-x2w| |0 0 0 0| | 0 | | 0 0 1 0| |-x2z|
* | 0 0 0 0| -> | 0 | |0 0 1 0| -> |-x2z| | 0 0 0 0| -> | 0 |
* | 0 0 0 0| | 0 | |0-1 0 0| | x2y| |-1 0 0 0| | x2x|
* |-1 0 0 0| | x2x| |0 0 0 0| | 0 | | 0 0 0 0| | 0 |
* H4 = H5 = H6 =
* |0 0 0 0| | 0 | | 0 1 0 0| |-x2y| |0 0 0 0| | 0 |
* |0 0 0 1| -> |-x2w| |-1 0 0 0| -> | x2x| |0 0 0 0| -> | 0 |
* |0 0 0 0| | 0 | | 0 0 0 0| | 0 | |0 0 0 1| |-x2w|
* |0-1 0 0| | x2y| | 0 0 0 0| | 0 | |0 0-1 0| | x2z|
* |a b c d|
* A = |e f g h|
* |i j k l|
* |m n o 1|
*
* x2^t * H1 * A *x1 = (-x2w*a +x2x*m )*x1x + (-x2w*b +x2x*n )*x1y + (-x2w*c +x2x*o )*x1z + (-x2w*d +x2x*1 )*x1w = 0
* x2^t * H2 * A *x1 = (-x2z*e +x2y*i )*x1x + (-x2z*f +x2y*j )*x1y + (-x2z*g +x2y*k )*x1z + (-x2z*h +x2y*l )*x1w = 0
* x2^t * H3 * A *x1 = (-x2z*a +x2x*i )*x1x + (-x2z*b +x2x*j )*x1y + (-x2z*c +x2x*k )*x1z + (-x2z*d +x2x*l )*x1w = 0
* x2^t * H4 * A *x1 = (-x2w*e +x2y*m )*x1x + (-x2w*f +x2y*n )*x1y + (-x2w*g +x2y*o )*x1z + (-x2w*h +x2y*1 )*x1w = 0
* x2^t * H5 * A *x1 = (-x2y*a +x2x*e )*x1x + (-x2y*b +x2x*f )*x1y + (-x2y*c +x2x*g )*x1z + (-x2y*d +x2x*h )*x1w = 0
* x2^t * H6 * A *x1 = (-x2w*i +x2z*m )*x1x + (-x2w*j +x2z*n )*x1y + (-x2w*k +x2z*o )*x1z + (-x2w*l +x2z*1 )*x1w = 0
*
* X = |a b c d e f g h i j k l m n o|^t
*/
bool Homography3DFromCorrespondencesLinear(const Mat &x1,
const Mat &x2,
Mat4 *H,
double expected_precision) {
assert(4 == x1.rows());
assert(5 <= x1.cols());
assert(x1.rows() == x2.rows());
assert(x1.cols() == x2.cols());
const int x = 0;
const int y = 1;
const int z = 2;
const int w = 3;
int n = x1.cols();
MatX15 L = Mat::Zero(n * 6, 15);
Mat b = Mat::Zero(n * 6, 1);
for (int i = 0; i < n; ++i) {
int j = 6 * i;
L(j, 0) = -x2(w, i) * x1(x, i); // a
L(j, 1) = -x2(w, i) * x1(y, i); // b
L(j, 2) = -x2(w, i) * x1(z, i); // c
L(j, 3) = -x2(w, i) * x1(w, i); // d
L(j, 12) = x2(x, i) * x1(x, i); // m
L(j, 13) = x2(x, i) * x1(y, i); // n
L(j, 14) = x2(x, i) * x1(z, i); // o
b(j, 0) = -x2(x, i) * x1(w, i);
++j;
L(j, 4) = -x2(z, i) * x1(x, i); // e
L(j, 5) = -x2(z, i) * x1(y, i); // f
L(j, 6) = -x2(z, i) * x1(z, i); // g
L(j, 7) = -x2(z, i) * x1(w, i); // h
L(j, 8) = x2(y, i) * x1(x, i); // i
L(j, 9) = x2(y, i) * x1(y, i); // j
L(j, 10) = x2(y, i) * x1(z, i); // k
L(j, 11) = x2(y, i) * x1(w, i); // l
++j;
L(j, 0) = -x2(z, i) * x1(x, i); // a
L(j, 1) = -x2(z, i) * x1(y, i); // b
L(j, 2) = -x2(z, i) * x1(z, i); // c
L(j, 3) = -x2(z, i) * x1(w, i); // d
L(j, 8) = x2(x, i) * x1(x, i); // i
L(j, 9) = x2(x, i) * x1(y, i); // j
L(j, 10) = x2(x, i) * x1(z, i); // k
L(j, 11) = x2(x, i) * x1(w, i); // l
++j;
L(j, 4) = -x2(w, i) * x1(x, i); // e
L(j, 5) = -x2(w, i) * x1(y, i); // f
L(j, 6) = -x2(w, i) * x1(z, i); // g
L(j, 7) = -x2(w, i) * x1(w, i); // h
L(j, 12) = x2(y, i) * x1(x, i); // m
L(j, 13) = x2(y, i) * x1(y, i); // n
L(j, 14) = x2(y, i) * x1(z, i); // o
b(j, 0) = -x2(y, i) * x1(w, i);
++j;
L(j, 0) = -x2(y, i) * x1(x, i); // a
L(j, 1) = -x2(y, i) * x1(y, i); // b
L(j, 2) = -x2(y, i) * x1(z, i); // c
L(j, 3) = -x2(y, i) * x1(w, i); // d
L(j, 4) = x2(x, i) * x1(x, i); // e
L(j, 5) = x2(x, i) * x1(y, i); // f
L(j, 6) = x2(x, i) * x1(z, i); // g
L(j, 7) = x2(x, i) * x1(w, i); // h
++j;
L(j, 8) = -x2(w, i) * x1(x, i); // i
L(j, 9) = -x2(w, i) * x1(y, i); // j
L(j, 10) = -x2(w, i) * x1(z, i); // k
L(j, 11) = -x2(w, i) * x1(w, i); // l
L(j, 12) = x2(z, i) * x1(x, i); // m
L(j, 13) = x2(z, i) * x1(y, i); // n
L(j, 14) = x2(z, i) * x1(z, i); // o
b(j, 0) = -x2(z, i) * x1(w, i);
}
// Solve Lx=B
Vec h = L.fullPivLu().solve(b);
if ((L * h).isApprox(b, expected_precision)) {
Homography3DNormalizedParameterization<double>::To(h, H);
return true;
} else {
return false;
}
}
double SymmetricGeometricDistance(const Mat3 &H,
const Vec2 &x1,
const Vec2 &x2) {
Vec3 x(x1(0), x1(1), 1.0);
Vec3 y(x2(0), x2(1), 1.0);
Vec3 H_x = H * x;
Vec3 Hinv_y = H.inverse() * y;
H_x /= H_x(2);
Hinv_y /= Hinv_y(2);
return (H_x.head<2>() - y.head<2>()).squaredNorm() +
(Hinv_y.head<2>() - x.head<2>()).squaredNorm();
}
} // namespace libmv

View File

@@ -1,145 +0,0 @@
// Copyright (c) 2011 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_MULTIVIEW_HOMOGRAPHY_H_
#define LIBMV_MULTIVIEW_HOMOGRAPHY_H_
#include "libmv/numeric/numeric.h"
namespace libmv {
/**
* 2D homography transformation estimation.
*
* This function estimates the homography transformation from a list of 2D
* correspondences which represents either:
*
* - 3D points on a plane, with a general moving camera.
* - 3D points with a rotating camera (pure rotation).
* - 3D points + different planar projections
*
* \param x1 The first 2xN or 3xN matrix of euclidean or homogeneous points.
* \param x2 The second 2xN or 3xN matrix of euclidean or homogeneous points.
* \param H The 3x3 homography transformation matrix (8 dof) such that
* x2 = H * x1 with |a b c|
* H = |d e f|
* |g h 1|
* \param expected_precision The expected precision in order for instance
* to accept almost homography matrices.
*
* \return True if the transformation estimation has succeeded.
* \note There must be at least 4 non-colinear points.
*/
bool Homography2DFromCorrespondencesLinear(const Mat &x1,
const Mat &x2,
Mat3 *H,
double expected_precision =
EigenDouble::dummy_precision());
/**
* This structure contains options that controls how the homography
* estimation operates.
*
* Defaults should be suitable for a wide range of use cases, but
* better performance and accuracy might require tweaking/
*/
struct EstimateHomographyOptions {
// Default constructor which sets up a options for generic usage.
EstimateHomographyOptions(void);
// Normalize correspondencies before estimating the homography
// in order to increase estimation stability.
//
// Normaliztion will make it so centroid od correspondences
// is the coordinate origin and their average distance from
// the origin is sqrt(2).
//
// See:
// - R. Hartley and A. Zisserman. Multiple View Geometry in Computer
// Vision. Cambridge University Press, second edition, 2003.
// - https://www.cs.ubc.ca/grads/resources/thesis/May09/Dubrofsky_Elan.pdf
bool use_normalization;
// Maximal number of iterations for the refinement step.
int max_num_iterations;
// Expected average of symmetric geometric distance between
// actual destination points and original ones transformed by
// estimated homography matrix.
//
// Refinement will finish as soon as average of symmetric
// geometric distance is less or equal to this value.
//
// This distance is measured in the same units as input points are.
double expected_average_symmetric_distance;
};
/**
* 2D homography transformation estimation.
*
* This function estimates the homography transformation from a list of 2D
* correspondences by doing algebraic estimation first followed with result
* refinement.
*/
bool EstimateHomography2DFromCorrespondences(
const Mat &x1,
const Mat &x2,
const EstimateHomographyOptions &options,
Mat3 *H);
/**
* 3D Homography transformation estimation.
*
* This function can be used in order to estimate the homography transformation
* from a list of 3D correspondences.
*
* \param[in] x1 The first 4xN matrix of homogeneous points
* \param[in] x2 The second 4xN matrix of homogeneous points
* \param[out] H The 4x4 homography transformation matrix (15 dof) such that
* x2 = H * x1 with |a b c d|
* H = |e f g h|
* |i j k l|
* |m n o 1|
* \param[in] expected_precision The expected precision in order for instance
* to accept almost homography matrices.
*
* \return true if the transformation estimation has succeeded
*
* \note Need at least 5 non coplanar points
* \note Points coordinates must be in homogeneous coordinates
*/
bool Homography3DFromCorrespondencesLinear(const Mat &x1,
const Mat &x2,
Mat4 *H,
double expected_precision =
EigenDouble::dummy_precision());
/**
* Calculate symmetric geometric cost:
*
* D(H * x1, x2)^2 + D(H^-1 * x2, x1)
*/
double SymmetricGeometricDistance(const Mat3 &H,
const Vec2 &x1,
const Vec2 &x2);
} // namespace libmv
#endif // LIBMV_MULTIVIEW_HOMOGRAPHY_H_

View File

@@ -1,248 +0,0 @@
// Copyright (c) 2011 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_MULTIVIEW_HOMOGRAPHY_ERRORS_H_
#define LIBMV_MULTIVIEW_HOMOGRAPHY_ERRORS_H_
#include "libmv/multiview/projection.h"
namespace libmv {
namespace homography {
namespace homography2D {
/**
* Structure for estimating the asymmetric error between a vector x2 and the
* transformed x1 such that
* Error = ||x2 - Psi(H * x1)||^2
* where Psi is the function that transforms homogeneous to euclidean coords.
* \note It should be distributed as Chi-squared with k = 2.
*/
struct AsymmetricError {
/**
* Computes the asymmetric residuals between a set of 2D points x2 and the
* transformed 2D point set x1 such that
* Residuals_i = x2_i - Psi(H * x1_i)
* where Psi is the function that transforms homogeneous to euclidean coords.
*
* \param[in] H The 3x3 homography matrix.
* The estimated homography should approximatelly hold the condition y = H x.
* \param[in] x1 A set of 2D points (2xN or 3xN matrix of column vectors).
* \param[in] x2 A set of 2D points (2xN or 3xN matrix of column vectors).
* \param[out] dx A 2xN matrix of column vectors of residuals errors
*/
static void Residuals(const Mat &H, const Mat &x1,
const Mat &x2, Mat2X *dx) {
dx->resize(2, x1.cols());
Mat3X x2h_est;
if (x1.rows() == 2)
x2h_est = H * EuclideanToHomogeneous(static_cast<Mat2X>(x1));
else
x2h_est = H * x1;
dx->row(0) = x2h_est.row(0).array() / x2h_est.row(2).array();
dx->row(1) = x2h_est.row(1).array() / x2h_est.row(2).array();
if (x2.rows() == 2)
*dx = x2 - *dx;
else
*dx = HomogeneousToEuclidean(static_cast<Mat3X>(x2)) - *dx;
}
/**
* Computes the asymmetric residuals between a 2D point x2 and the transformed
* 2D point x1 such that
* Residuals = x2 - Psi(H * x1)
* where Psi is the function that transforms homogeneous to euclidean coords.
*
* \param[in] H The 3x3 homography matrix.
* The estimated homography should approximatelly hold the condition y = H x.
* \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[out] dx A vector of size 2 of the residual error
*/
static void Residuals(const Mat &H, const Vec &x1,
const Vec &x2, Vec2 *dx) {
Vec3 x2h_est;
if (x1.rows() == 2)
x2h_est = H * EuclideanToHomogeneous(static_cast<Vec2>(x1));
else
x2h_est = H * x1;
if (x2.rows() == 2)
*dx = x2 - x2h_est.head<2>() / x2h_est[2];
else
*dx = HomogeneousToEuclidean(static_cast<Vec3>(x2)) -
x2h_est.head<2>() / x2h_est[2];
}
/**
* Computes the squared norm of the residuals between a set of 2D points x2
* and the transformed 2D point set x1 such that
* Error = || x2 - Psi(H * x1) ||^2
* where Psi is the function that transforms homogeneous to euclidean coords.
*
* \param[in] H The 3x3 homography matrix.
* The estimated homography should approximatelly hold the condition y = H x.
* \param[in] x1 A set of 2D points (2xN or 3xN matrix of column vectors).
* \param[in] x2 A set of 2D points (2xN or 3xN matrix of column vectors).
* \return The squared norm of the asymmetric residuals errors
*/
static double Error(const Mat &H, const Mat &x1, const Mat &x2) {
Mat2X dx;
Residuals(H, x1, x2, &dx);
return dx.squaredNorm();
}
/**
* Computes the squared norm of the residuals between a 2D point x2 and the
* transformed 2D point x1 such that rms = || x2 - Psi(H * x1) ||^2
* where Psi is the function that transforms homogeneous to euclidean coords.
*
* \param[in] H The 3x3 homography matrix.
* The estimated homography should approximatelly hold the condition y = H x.
* \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \return The squared norm of the asymmetric residual error
*/
static double Error(const Mat &H, const Vec &x1, const Vec &x2) {
Vec2 dx;
Residuals(H, x1, x2, &dx);
return dx.squaredNorm();
}
};
/**
* Structure for estimating the symmetric error
* between a vector x2 and the transformed x1 such that
* Error = ||x2 - Psi(H * x1)||^2 + ||x1 - Psi(H^-1 * x2)||^2
* where Psi is the function that transforms homogeneous to euclidean coords.
* \note It should be distributed as Chi-squared with k = 4.
*/
struct SymmetricError {
/**
* Computes the squared norm of the residuals between x2 and the
* transformed x1 such that
* Error = ||x2 - Psi(H * x1)||^2 + ||x1 - Psi(H^-1 * x2)||^2
* where Psi is the function that transforms homogeneous to euclidean coords.
*
* \param[in] H The 3x3 homography matrix.
* The estimated homography should approximatelly hold the condition y = H x.
* \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \return The squared norm of the symmetric residuals errors
*/
static double Error(const Mat &H, const Vec &x1, const Vec &x2) {
// TODO(keir): This is awesomely inefficient because it does a 3x3
// inversion for each evaluation.
Mat3 Hinv = H.inverse();
return AsymmetricError::Error(H, x1, x2) +
AsymmetricError::Error(Hinv, x2, x1);
}
// TODO(julien) Add residuals function \see AsymmetricError
};
/**
* Structure for estimating the algebraic error (cross product)
* between a vector x2 and the transformed x1 such that
* Error = ||[x2] * H * x1||^^2
* where [x2] is the skew matrix of x2.
*/
struct AlgebraicError {
// TODO(julien) Make an AlgebraicError2Rows and AlgebraicError3Rows
/**
* Computes the algebraic residuals (cross product) between a set of 2D
* points x2 and the transformed 2D point set x1 such that
* [x2] * H * x1 where [x2] is the skew matrix of x2.
*
* \param[in] H The 3x3 homography matrix.
* The estimated homography should approximatelly hold the condition y = H x.
* \param[in] x1 A set of 2D points (2xN or 3xN matrix of column vectors).
* \param[in] x2 A set of 2D points (2xN or 3xN matrix of column vectors).
* \param[out] dx A 3xN matrix of column vectors of residuals errors
*/
static void Residuals(const Mat &H, const Mat &x1,
const Mat &x2, Mat3X *dx) {
dx->resize(3, x1.cols());
Vec3 col;
for (int i = 0; i < x1.cols(); ++i) {
Residuals(H, x1.col(i), x2.col(i), &col);
dx->col(i) = col;
}
}
/**
* Computes the algebraic residuals (cross product) between a 2D point x2
* and the transformed 2D point x1 such that
* [x2] * H * x1 where [x2] is the skew matrix of x2.
*
* \param[in] H The 3x3 homography matrix.
* The estimated homography should approximatelly hold the condition y = H x.
* \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[out] dx A vector of size 3 of the residual error
*/
static void Residuals(const Mat &H, const Vec &x1,
const Vec &x2, Vec3 *dx) {
Vec3 x2h_est;
if (x1.rows() == 2)
x2h_est = H * EuclideanToHomogeneous(static_cast<Vec2>(x1));
else
x2h_est = H * x1;
if (x2.rows() == 2)
*dx = SkewMat(EuclideanToHomogeneous(static_cast<Vec2>(x2))) * x2h_est;
else
*dx = SkewMat(x2) * x2h_est;
// TODO(julien) This is inefficient since it creates an
// identical 3x3 skew matrix for each evaluation.
}
/**
* Computes the squared norm of the algebraic residuals between a set of 2D
* points x2 and the transformed 2D point set x1 such that
* [x2] * H * x1 where [x2] is the skew matrix of x2.
*
* \param[in] H The 3x3 homography matrix.
* The estimated homography should approximatelly hold the condition y = H x.
* \param[in] x1 A set of 2D points (2xN or 3xN matrix of column vectors).
* \param[in] x2 A set of 2D points (2xN or 3xN matrix of column vectors).
* \return The squared norm of the asymmetric residuals errors
*/
static double Error(const Mat &H, const Mat &x1, const Mat &x2) {
Mat3X dx;
Residuals(H, x1, x2, &dx);
return dx.squaredNorm();
}
/**
* Computes the squared norm of the algebraic residuals between a 2D point x2
* and the transformed 2D point x1 such that
* [x2] * H * x1 where [x2] is the skew matrix of x2.
*
* \param[in] H The 3x3 homography matrix.
* The estimated homography should approximatelly hold the condition y = H x.
* \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \return The squared norm of the asymmetric residual error
*/
static double Error(const Mat &H, const Vec &x1, const Vec &x2) {
Vec3 dx;
Residuals(H, x1, x2, &dx);
return dx.squaredNorm();
}
};
// TODO(keir): Add error based on ideal points.
} // namespace homography2D
// TODO(julien) add homography3D errors
} // namespace homography
} // namespace libmv
#endif // LIBMV_MULTIVIEW_HOMOGRAPHY_ERRORS_H_

View File

@@ -1,91 +0,0 @@
// Copyright (c) 2011 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_MULTIVIEW_HOMOGRAPHY_PARAMETERIZATION_H_
#define LIBMV_MULTIVIEW_HOMOGRAPHY_PARAMETERIZATION_H_
#include "libmv/numeric/numeric.h"
namespace libmv {
/** A parameterization of the 2D homography matrix that uses 8 parameters so
* that the matrix is normalized (H(2,2) == 1).
* The homography matrix H is built from a list of 8 parameters (a, b,...g, h)
* as follows
* |a b c|
* H = |d e f|
* |g h 1|
*/
template<typename T = double>
class Homography2DNormalizedParameterization {
public:
typedef Eigen::Matrix<T, 8, 1> Parameters; // a, b, ... g, h
typedef Eigen::Matrix<T, 3, 3> Parameterized; // H
/// Convert from the 8 parameters to a H matrix.
static void To(const Parameters &p, Parameterized *h) {
*h << p(0), p(1), p(2),
p(3), p(4), p(5),
p(6), p(7), 1.0;
}
/// Convert from a H matrix to the 8 parameters.
static void From(const Parameterized &h, Parameters *p) {
*p << h(0, 0), h(0, 1), h(0, 2),
h(1, 0), h(1, 1), h(1, 2),
h(2, 0), h(2, 1);
}
};
/** A parameterization of the 2D homography matrix that uses 15 parameters so
* that the matrix is normalized (H(3,3) == 1).
* The homography matrix H is built from a list of 15 parameters (a, b,...n, o)
* as follows
* |a b c d|
* H = |e f g h|
* |i j k l|
* |m n o 1|
*/
template<typename T = double>
class Homography3DNormalizedParameterization {
public:
typedef Eigen::Matrix<T, 15, 1> Parameters; // a, b, ... n, o
typedef Eigen::Matrix<T, 4, 4> Parameterized; // H
/// Convert from the 15 parameters to a H matrix.
static void To(const Parameters &p, Parameterized *h) {
*h << p(0), p(1), p(2), p(3),
p(4), p(5), p(6), p(7),
p(8), p(9), p(10), p(11),
p(12), p(13), p(14), 1.0;
}
/// Convert from a H matrix to the 15 parameters.
static void From(const Parameterized &h, Parameters *p) {
*p << h(0, 0), h(0, 1), h(0, 2), h(0, 3),
h(1, 0), h(1, 1), h(1, 2), h(1, 3),
h(2, 0), h(2, 1), h(2, 2), h(2, 3),
h(3, 0), h(3, 1), h(3, 2);
}
};
} // namespace libmv
#endif // LIBMV_MULTIVIEW_HOMOGRAPHY_PARAMETERIZATION_H_

View File

@@ -1,261 +0,0 @@
// Copyright (c) 2011 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "testing/testing.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/homography.h"
namespace {
using namespace libmv;
namespace {
// Check whether homography transform M actually transforms
// given vectors x1 to x2. Used to check validness of a reconstructed
// homography matrix.
// TODO(sergey): Consider using this in all tests since possible homography
// matrix is not fixed to a single value and different-looking matrices
// might actually crrespond to the same exact transform.
void CheckHomography2DTransform(const Mat3 &H,
const Mat &x1,
const Mat &x2) {
for (int i = 0; i < x2.cols(); ++i) {
Vec3 x2_expected = x2.col(i);
Vec3 x2_observed = H * x1.col(i);
x2_observed /= x2_observed(2);
EXPECT_MATRIX_NEAR(x2_expected, x2_observed, 1e-8);
}
}
} // namespace
TEST(Homography2DTest, Rotation45AndTranslationXY) {
Mat x1(3, 4);
x1 << 0, 1, 0, 5,
0, 0, 2, 3,
1, 1, 1, 1;
double angle = 45.0;
Mat3 m;
m << cos(angle), -sin(angle), -2,
sin(angle), cos(angle), 5,
0, 0, 1;
Mat x2 = x1;
// Transform point from ground truth matrix
for (int i = 0; i < x2.cols(); ++i)
x2.col(i) = m * x1.col(i);
Mat3 homography_mat;
EXPECT_TRUE(Homography2DFromCorrespondencesLinear(x1, x2, &homography_mat));
VLOG(1) << "Mat Homography2D ";
VLOG(1) << homography_mat;
VLOG(1) << "Mat GT ";
VLOG(1) << m;
EXPECT_MATRIX_NEAR(homography_mat, m, 1e-8);
}
TEST(Homography2DTest, AffineGeneral4) {
// TODO(julien) find why it doesn't work with 4 points!!!
Mat x1(3, 4);
x1 << 0, 1, 0, 2,
0, 0, 1, 2,
1, 1, 1, 1;
Mat3 m;
m << 3, -1, 4,
6, -2, -3,
0, 0, 1;
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i) {
x2.col(i) = m * x1.col(i);
}
Mat3 homography_mat;
EXPECT_TRUE(Homography2DFromCorrespondencesLinear(x1, x2, &homography_mat));
VLOG(1) << "Mat Homography2D";
VLOG(1) << homography_mat;
CheckHomography2DTransform(homography_mat, x1, x2);
// Test with euclidean coordinates
Mat eX1, eX2;
HomogeneousToEuclidean(x1, &eX1);
HomogeneousToEuclidean(x2, &eX2);
homography_mat.setIdentity();
EXPECT_TRUE(Homography2DFromCorrespondencesLinear(eX1, eX2, &homography_mat));
VLOG(1) << "Mat Homography2D ";
VLOG(1) << homography_mat;
CheckHomography2DTransform(homography_mat, x1, x2);
}
TEST(Homography2DTest, AffineGeneral5) {
Mat x1(3, 5);
x1 << 0, 1, 0, 2, 5,
0, 0, 1, 2, 2,
1, 1, 1, 1, 1;
Mat3 m;
m << 3, -1, 4,
6, -2, -3,
0, 0, 1;
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i)
x2.col(i) = m * x1.col(i);
Mat3 homography_mat;
EXPECT_TRUE(Homography2DFromCorrespondencesLinear(x1, x2, &homography_mat));
VLOG(1) << "Mat Homography2D ";
VLOG(1) << homography_mat;
EXPECT_MATRIX_NEAR(homography_mat, m, 1e-8);
// Test with euclidean coordinates
Mat eX1, eX2;
HomogeneousToEuclidean(x1, &eX1);
HomogeneousToEuclidean(x2, &eX2);
homography_mat.setIdentity();
EXPECT_TRUE(Homography2DFromCorrespondencesLinear(eX1, eX2, &homography_mat));
VLOG(1) << "Mat Homography2D ";
VLOG(1) << homography_mat;
EXPECT_MATRIX_NEAR(homography_mat, m, 1e-8);
}
TEST(Homography2DTest, HomographyGeneral) {
Mat x1(3, 4);
x1 << 0, 1, 0, 5,
0, 0, 2, 3,
1, 1, 1, 1;
Mat3 m;
m << 3, -1, 4,
6, -2, -3,
1, -3, 1;
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i)
x2.col(i) = m * x1.col(i);
Mat3 homography_mat;
EXPECT_TRUE(Homography2DFromCorrespondencesLinear(x1, x2, &homography_mat));
VLOG(1) << "Mat Homography2D ";
VLOG(1) << homography_mat;
EXPECT_MATRIX_NEAR(homography_mat, m, 1e-8);
}
TEST(Homography3DTest, RotationAndTranslationXYZ) {
Mat x1(4, 5);
x1 << 0, 0, 1, 5, 2,
0, 1, 2, 3, 5,
0, 2, 0, 1, 5,
1, 1, 1, 1, 1;
Mat4 M;
M.setIdentity();
/*
M = AngleAxisd(45.0, Vector3f::UnitZ())
* AngleAxisd(25.0, Vector3f::UnitX())
* AngleAxisd(5.0, Vector3f::UnitZ());*/
// Rotation on x + translation
double angle = 45.0;
Mat4 rot;
rot << 1, 0, 0, 1,
0, cos(angle), -sin(angle), 3,
0, sin(angle), cos(angle), -2,
0, 0, 0, 1;
M *= rot;
// Rotation on y
angle = 25.0;
rot << cos(angle), 0, sin(angle), 0,
0, 1, 0, 0,
-sin(angle), 0, cos(angle), 0,
0, 0, 0, 1;
M *= rot;
// Rotation on z
angle = 5.0;
rot << cos(angle), -sin(angle), 0, 0,
sin(angle), cos(angle), 0, 0,
0, 0, 1, 0,
0, 0, 0, 1;
M *= rot;
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i) {
x2.col(i) = M * x1.col(i);
}
Mat4 homography_mat;
EXPECT_TRUE(Homography3DFromCorrespondencesLinear(x1, x2, &homography_mat));
VLOG(1) << "Mat Homography3D " << homography_mat;
VLOG(1) << "Mat GT " << M;
EXPECT_MATRIX_NEAR(homography_mat, M, 1e-8);
}
TEST(Homography3DTest, AffineGeneral) {
Mat x1(4, 5);
x1 << 0, 0, 1, 5, 2,
0, 1, 2, 3, 5,
0, 2, 0, 1, 5,
1, 1, 1, 1, 1;
Mat4 m;
m << 3, -1, 4, 1,
6, -2, -3, -6,
1, 0, 1, 2,
0, 0, 0, 1;
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i) {
x2.col(i) = m * x1.col(i);
}
Mat4 homography_mat;
EXPECT_TRUE(Homography3DFromCorrespondencesLinear(x1, x2, &homography_mat));
VLOG(1) << "Mat Homography3D ";
VLOG(1) << homography_mat;
EXPECT_MATRIX_NEAR(homography_mat, m, 1e-8);
}
TEST(Homography3DTest, HomographyGeneral) {
Mat x1(4, 5);
x1 << 0, 0, 1, 5, 2,
0, 1, 2, 3, 5,
0, 2, 0, 1, 5,
1, 1, 1, 1, 1;
Mat4 m;
m << 3, -1, 4, 1,
6, -2, -3, -6,
1, 0, 1, 2,
-3, 1, 0, 1;
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i) {
x2.col(i) = m * x1.col(i);
}
Mat4 homography_mat;
EXPECT_TRUE(Homography3DFromCorrespondencesLinear(x1, x2, &homography_mat));
VLOG(1) << "Mat Homography3D";
VLOG(1) << homography_mat;
EXPECT_MATRIX_NEAR(homography_mat, m, 1e-8);
}
} // namespace

View File

@@ -1,80 +0,0 @@
// Copyright (c) 2009 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// Compute a 3D position of a point from several images of it. In particular,
// compute the projective point X in R^4 such that x = PX.
//
// Algorithm is the standard DLT; for derivation see appendix of Keir's thesis.
#ifndef LIBMV_MULTIVIEW_NVIEWTRIANGULATION_H
#define LIBMV_MULTIVIEW_NVIEWTRIANGULATION_H
#include "libmv/base/vector.h"
#include "libmv/logging/logging.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
// x's are 2D coordinates (x,y,1) in each image; Ps are projective cameras. The
// output, X, is a homogeneous four vectors.
template<typename T>
void NViewTriangulate(const Matrix<T, 2, Dynamic> &x,
const vector<Matrix<T, 3, 4> > &Ps,
Matrix<T, 4, 1> *X) {
int nviews = x.cols();
assert(nviews == Ps.size());
Matrix<T, Dynamic, Dynamic> design(3*nviews, 4 + nviews);
design.setConstant(0.0);
for (int i = 0; i < nviews; i++) {
design.template block<3, 4>(3*i, 0) = -Ps[i];
design(3*i + 0, 4 + i) = x(0, i);
design(3*i + 1, 4 + i) = x(1, i);
design(3*i + 2, 4 + i) = 1.0;
}
Matrix<T, Dynamic, 1> X_and_alphas;
Nullspace(&design, &X_and_alphas);
X->resize(4);
*X = X_and_alphas.head(4);
}
// x's are 2D coordinates (x,y,1) in each image; Ps are projective cameras. The
// output, X, is a homogeneous four vectors.
// This method uses the algebraic distance approximation.
// Note that this method works better when the 2D points are normalized
// with an isotopic normalization.
template<typename T>
void NViewTriangulateAlgebraic(const Matrix<T, 2, Dynamic> &x,
const vector<Matrix<T, 3, 4> > &Ps,
Matrix<T, 4, 1> *X) {
int nviews = x.cols();
assert(nviews == Ps.size());
Matrix<T, Dynamic, 4> design(2*nviews, 4);
for (int i = 0; i < nviews; i++) {
design.template block<2, 4>(2*i, 0) = SkewMatMinimal(x.col(i)) * Ps[i];
}
X->resize(4);
Nullspace(&design, X);
}
} // namespace libmv
#endif // LIBMV_MULTIVIEW_RESECTION_H

View File

@@ -1,94 +0,0 @@
// Copyright (c) 2009 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include <iostream>
#include "libmv/base/vector.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/nviewtriangulation.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/test_data_sets.h"
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
namespace {
using namespace libmv;
TEST(NViewTriangulate, FiveViews) {
int nviews = 5;
int npoints = 6;
NViewDataSet d = NRealisticCamerasFull(nviews, npoints);
// Collect P matrices together.
vector<Mat34> Ps(nviews);
for (int j = 0; j < nviews; ++j) {
Ps[j] = d.P(j);
}
for (int i = 0; i < npoints; ++i) {
// Collect the image of point i in each frame.
Mat2X xs(2, nviews);
for (int j = 0; j < nviews; ++j) {
xs.col(j) = d.x[j].col(i);
}
Vec4 X;
NViewTriangulate(xs, Ps, &X);
// Check reprojection error. Should be nearly zero.
for (int j = 0; j < nviews; ++j) {
Vec3 x_reprojected = Ps[j]*X;
x_reprojected /= x_reprojected(2);
double error = (x_reprojected.head(2) - xs.col(j)).norm();
EXPECT_NEAR(error, 0.0, 1e-9);
}
}
}
TEST(NViewTriangulateAlgebraic, FiveViews) {
int nviews = 5;
int npoints = 6;
NViewDataSet d = NRealisticCamerasFull(nviews, npoints);
// Collect P matrices together.
vector<Mat34> Ps(nviews);
for (int j = 0; j < nviews; ++j) {
Ps[j] = d.P(j);
}
for (int i = 0; i < npoints; ++i) {
// Collect the image of point i in each frame.
Mat2X xs(2, nviews);
for (int j = 0; j < nviews; ++j) {
xs.col(j) = d.x[j].col(i);
}
Vec4 X;
NViewTriangulate(xs, Ps, &X);
// Check reprojection error. Should be nearly zero.
for (int j = 0; j < nviews; ++j) {
Vec3 x_reprojected = Ps[j]*X;
x_reprojected /= x_reprojected(2);
double error = (x_reprojected.head<2>() - xs.col(j)).norm();
EXPECT_NEAR(error, 0.0, 1e-9);
}
}
}
} // namespace

View File

@@ -1,125 +0,0 @@
// Copyright (c) 2009 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
#include "libmv/multiview/panography.h"
namespace libmv {
static bool Build_Minimal2Point_PolynomialFactor(
const Mat & x1, const Mat & x2,
double * P) { // P must be a double[4]
assert(2 == x1.rows());
assert(2 == x1.cols());
assert(x1.rows() == x2.rows());
assert(x1.cols() == x2.cols());
// Setup the variable of the input problem:
Vec xx1 = (x1.col(0)).transpose();
Vec yx1 = (x1.col(1)).transpose();
double a12 = xx1.dot(yx1);
Vec xx2 = (x2.col(0)).transpose();
Vec yx2 = (x2.col(1)).transpose();
double b12 = xx2.dot(yx2);
double a1 = xx1.squaredNorm();
double a2 = yx1.squaredNorm();
double b1 = xx2.squaredNorm();
double b2 = yx2.squaredNorm();
// Build the 3rd degre polynomial in F^2.
//
// f^6 * p + f^4 * q + f^2* r + s = 0;
//
// Coefficients in ascending powers of alpha, i.e. P[N]*x^N.
// Run panography_coeffs.py to get the below coefficients.
P[0] = b1*b2*a12*a12-a1*a2*b12*b12;
P[1] = -2*a1*a2*b12+2*a12*b1*b2+b1*a12*a12+b2*a12*a12-a1*b12*b12-a2*b12*b12;
P[2] = b1*b2-a1*a2-2*a1*b12-2*a2*b12+2*a12*b1+2*a12*b2+a12*a12-b12*b12;
P[3] = b1+b2-2*b12-a1-a2+2*a12;
// If P[3] equal to 0 we get ill conditionned data
return (P[3] != 0.0);
}
// This implements a minimal solution (2 points) for panoramic stitching:
//
// http://www.cs.ubc.ca/~mbrown/minimal/minimal.html
//
// [1] M. Brown and R. Hartley and D. Nister. Minimal Solutions for Panoramic
// Stitching. CVPR07.
void F_FromCorrespondance_2points(const Mat &x1, const Mat &x2,
vector<double> *fs) {
// Build Polynomial factor to get squared focal value.
double P[4];
Build_Minimal2Point_PolynomialFactor(x1, x2, &P[0]);
// Solve it by using F = f^2 and a Cubic polynomial solver
//
// F^3 * p + F^2 * q + F^1 * r + s = 0
//
double roots[3];
int num_roots = SolveCubicPolynomial(P, roots);
for (int i = 0; i < num_roots; ++i) {
if (roots[i] > 0.0) {
fs->push_back(sqrt(roots[i]));
}
}
}
// Compute the 3x3 rotation matrix that fits two 3D point clouds in the least
// square sense. The method is from:
//
// K. Arun,T. Huand and D. Blostein. Least-squares fitting of 2 3-D point
// sets. IEEE Transactions on Pattern Analysis and Machine Intelligence,
// 9:698-700, 1987.
void GetR_FixedCameraCenter(const Mat &x1, const Mat &x2,
const double focal,
Mat3 *R) {
assert(3 == x1.rows());
assert(2 <= x1.cols());
assert(x1.rows() == x2.rows());
assert(x1.cols() == x2.cols());
// Build simplified K matrix
Mat3 K(Mat3::Identity() * 1.0/focal);
K(2, 2)= 1.0;
// Build the correlation matrix; equation (22) in [1].
Mat3 C = Mat3::Zero();
for (int i = 0; i < x1.cols(); ++i) {
Mat r1i = (K * x1.col(i)).normalized();
Mat r2i = (K * x2.col(i)).normalized();
C += r2i * r1i.transpose();
}
// Solve for rotation. Equations (24) and (25) in [1].
Eigen::JacobiSVD<Mat> svd(C, Eigen::ComputeThinU | Eigen::ComputeThinV);
Mat3 scale = Mat3::Identity();
scale(2, 2) = ((svd.matrixU() * svd.matrixV().transpose()).determinant() > 0.0)
? 1.0
: -1.0;
(*R) = svd.matrixU() * scale * svd.matrixV().transpose();
}
} // namespace libmv

View File

@@ -1,99 +0,0 @@
// Copyright (c) 2009 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
#ifndef LIBMV_MULTIVIEW_PANOGRAPHY_H
#define LIBMV_MULTIVIEW_PANOGRAPHY_H
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/poly.h"
#include "libmv/base/vector.h"
namespace libmv {
// This implements a minimal solution (2 points) for panoramic stitching:
//
// http://www.cs.ubc.ca/~mbrown/minimal/minimal.html
//
// [1] M. Brown and R. Hartley and D. Nister. Minimal Solutions for Panoramic
// Stitching. CVPR07.
//
// The 2-point algorithm solves for the rotation of the camera with a single
// focal length (4 degrees of freedom).
//
// Compute from 1 to 3 possible focal lenght for 2 point correspondences.
// Suppose that the cameras share the same optical center and focal lengths:
//
// Image 1 => H*x = x' => Image 2
// x (u1j) x' (u2j)
// a (u11) a' (u21)
// b (u12) b' (u22)
//
// The return values are 1 to 3 possible values for the focal lengths such
// that:
//
// [f 0 0]
// K = [0 f 0]
// [0 0 1]
//
void F_FromCorrespondance_2points(const Mat &x1, const Mat &x2,
vector<double> *fs);
// Compute the 3x3 rotation matrix that fits two 3D point clouds in the least
// square sense. The method is from:
//
// K. Arun,T. Huand and D. Blostein. Least-squares fitting of 2 3-D point
// sets. IEEE Transactions on Pattern Analysis and Machine Intelligence,
// 9:698-700, 1987.
//
// Given the calibration matrices K1, K2 solve for the rotation from
// corresponding image rays.
//
// R = min || X2 - R * x1 ||.
//
// In case of panography, which is for a camera that shares the same camera
// center,
//
// H = K2 * R * K1.inverse();
//
// For the full explanation, see Section 8, Solving for Rotation from [1].
//
// Parameters:
//
// x1 : Point cloud A (3D coords)
// x2 : Point cloud B (3D coords)
//
// [f 0 0]
// K1 = [0 f 0]
// [0 0 1]
//
// K2 (the same form as K1, but may have different f)
//
// Returns: A rotation matrix that minimizes
//
// R = arg min || X2 - R * x1 ||
//
void GetR_FixedCameraCenter(const Mat &x1, const Mat &x2,
const double focal,
Mat3 *R);
} // namespace libmv
#endif // LIBMV_MULTIVIEW_PANOGRAPHY_H

View File

@@ -1,51 +0,0 @@
// Copyright (c) 2008, 2009 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/multiview/panography_kernel.h"
#include "libmv/multiview/panography.h"
namespace libmv {
namespace panography {
namespace kernel {
void TwoPointSolver::Solve(const Mat &x1, const Mat &x2, vector<Mat3> *Hs) {
// Solve for the focal lengths.
vector<double> fs;
F_FromCorrespondance_2points(x1, x2, &fs);
// Then solve for the rotations and homographies.
Mat x1h, x2h;
EuclideanToHomogeneous(x1, &x1h);
EuclideanToHomogeneous(x2, &x2h);
for (int i = 0; i < fs.size(); ++i) {
Mat3 K1 = Mat3::Identity() * fs[i];
K1(2, 2) = 1.0;
Mat3 R;
GetR_FixedCameraCenter(x1h, x2h, fs[i], &R);
R /= R(2, 2);
(*Hs).push_back(K1 * R * K1.inverse());
}
}
} // namespace kernel
} // namespace panography
} // namespace libmv

View File

@@ -1,54 +0,0 @@
// Copyright (c) 2009 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_MULTIVIEW_PANOGRAPHY_KERNEL_H
#define LIBMV_MULTIVIEW_PANOGRAPHY_KERNEL_H
#include "libmv/base/vector.h"
#include "libmv/multiview/conditioning.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/two_view_kernel.h"
#include "libmv/multiview/homography_error.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
namespace panography {
namespace kernel {
struct TwoPointSolver {
enum { MINIMUM_SAMPLES = 2 };
static void Solve(const Mat &x1, const Mat &x2, vector<Mat3> *Hs);
};
typedef two_view::kernel::Kernel<
TwoPointSolver, homography::homography2D::AsymmetricError, Mat3>
UnnormalizedKernel;
typedef two_view::kernel::Kernel<
two_view::kernel::NormalizedSolver<TwoPointSolver, UnnormalizerI>,
homography::homography2D::AsymmetricError,
Mat3>
Kernel;
} // namespace kernel
} // namespace panography
} // namespace libmv
#endif // LIBMV_MULTIVIEW_PANOGRAPHY_KERNEL_H

View File

@@ -1,144 +0,0 @@
// Copyright (c) 2009 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/logging/logging.h"
#include "libmv/multiview/panography.h"
#include "libmv/multiview/panography_kernel.h"
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
namespace libmv {
namespace {
TEST(Panography, PrintSomeSharedFocalEstimationValues) {
Mat x1(2, 2), x2(2, 2);
x1<< 158, 78,
124, 113;
x2<< 300, 214,
125, 114;
// Normalize data (set principal point 0,0 and image border to 1.0).
x1.block<1, 2>(0, 0) /= 320;
x1.block<1, 2>(1, 0) /= 240;
x2.block<1, 2>(0, 0) /= 320;
x2.block<1, 2>(1, 0) /= 240;
x1+=Mat2::Constant(0.5);
x2+=Mat2::Constant(0.5);
vector<double> fs;
F_FromCorrespondance_2points(x1, x2, &fs);
// Assert we found a valid solution.
EXPECT_EQ(1, fs.size());
EXPECT_NEAR(1.01667, fs[1], 1e-3);
}
TEST(Panography, GetR_FixedCameraCenterWithIdentity) {
Mat x1(3, 3);
x1 << 0.5, 0.6, 0.7,
0.5, 0.5, 0.4,
10.0, 10.0, 10.0;
Mat3 R;
GetR_FixedCameraCenter(x1, x1, 1.0, &R);
R /= R(2, 2);
EXPECT_MATRIX_NEAR(Mat3::Identity(), R, 1e-8);
LOG(INFO) << "R \n" << R;
}
TEST(Panography, Homography_GetR_Test_PitchY30) {
int n = 3;
Mat x1(3, n);
x1 << 0.5, 0.6, 0.7,
0.5, 0.5, 0.4,
10, 10, 10;
Mat x2 = x1;
const double alpha = 30.0 * M_PI / 180.0;
Mat3 rotY;
rotY << cos(alpha), 0, -sin(alpha),
0, 1, 0,
sin(alpha), 0, cos(alpha);
for (int i = 0; i < n; ++i) {
x2.block<3, 1>(0, i) = rotY * x1.col(i);
}
Mat3 R;
GetR_FixedCameraCenter(x1, x2, 1.0, &R);
// Assert that residuals are small enough
for (int i = 0; i < n; ++i) {
Vec residuals = (R * x1.col(i)) - x2.col(i);
EXPECT_NEAR(0, residuals.norm(), 1e-6);
}
// Check that the rotation angle along Y is the expected one.
// Use the euler approximation to recover the angle.
double pitch_y = asin(R(2, 0)) * 180.0 / M_PI;
EXPECT_NEAR(30, pitch_y, 1e-4);
}
TEST(MinimalPanoramic, Real_Case_Kernel) {
const int n = 2;
Mat x1(2, n); // From image 0.jpg
x1<< 158, 78,
124, 113;
Mat x2(2, n); // From image 3.jpg
x2<< 300, 214,
125, 114;
Mat3 Ground_TruthHomography;
Ground_TruthHomography<< 1, 0.02, 129.83,
-0.02, 1.012, 0.07823,
0, 0, 1;
vector<Mat3> Hs;
libmv::panography::kernel::TwoPointSolver::Solve(x1, x2, &Hs);
LOG(INFO) << "Got " << Hs.size() << " solutions.";
for (int j = 0; j < Hs.size(); ++j) {
Mat3 H = Hs[j];
EXPECT_MATRIX_NEAR(H, Ground_TruthHomography, 1e-1);
Mat x1h, x2h;
EuclideanToHomogeneous(x1, &x1h);
EuclideanToHomogeneous(x2, &x2h);
// Assert that residuals are small enough
for (int i = 0; i < n; ++i) {
Vec x1p = H * x1h.col(i);
Vec residuals = x1p/x1p(2) - x2h.col(i);
EXPECT_MATRIX_NEAR_ZERO(residuals, 1e-5);
}
}
}
} // namespace
} // namespace libmv
// TODO(pmoulon): Add a real test case based on images.
// TODO(pmoulon): Add a check for the actual f value for the real images.
// TODO(pmoulon): Add a test that has some inliers and outliers.

View File

@@ -1,224 +0,0 @@
// Copyright (c) 2007, 2008 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
void P_From_KRt(const Mat3 &K, const Mat3 &R, const Vec3 &t, Mat34 *P) {
P->block<3, 3>(0, 0) = R;
P->col(3) = t;
(*P) = K * (*P);
}
void KRt_From_P(const Mat34 &P, Mat3 *Kp, Mat3 *Rp, Vec3 *tp) {
// Decompose using the RQ decomposition HZ A4.1.1 pag.579.
Mat3 K = P.block(0, 0, 3, 3);
Mat3 Q;
Q.setIdentity();
// Set K(2,1) to zero.
if (K(2, 1) != 0) {
double c = -K(2, 2);
double s = K(2, 1);
double l = sqrt(c * c + s * s);
c /= l;
s /= l;
Mat3 Qx;
Qx << 1, 0, 0,
0, c, -s,
0, s, c;
K = K * Qx;
Q = Qx.transpose() * Q;
}
// Set K(2,0) to zero.
if (K(2, 0) != 0) {
double c = K(2, 2);
double s = K(2, 0);
double l = sqrt(c * c + s * s);
c /= l;
s /= l;
Mat3 Qy;
Qy << c, 0, s,
0, 1, 0,
-s, 0, c;
K = K * Qy;
Q = Qy.transpose() * Q;
}
// Set K(1,0) to zero.
if (K(1, 0) != 0) {
double c = -K(1, 1);
double s = K(1, 0);
double l = sqrt(c * c + s * s);
c /= l;
s /= l;
Mat3 Qz;
Qz << c, -s, 0,
s, c, 0,
0, 0, 1;
K = K * Qz;
Q = Qz.transpose() * Q;
}
Mat3 R = Q;
// Ensure that the diagonal is positive.
// TODO(pau) Change this to ensure that:
// - K(0,0) > 0
// - K(2,2) = 1
// - det(R) = 1
if (K(2, 2) < 0) {
K = -K;
R = -R;
}
if (K(1, 1) < 0) {
Mat3 S;
S << 1, 0, 0,
0, -1, 0,
0, 0, 1;
K = K * S;
R = S * R;
}
if (K(0, 0) < 0) {
Mat3 S;
S << -1, 0, 0,
0, 1, 0,
0, 0, 1;
K = K * S;
R = S * R;
}
// Compute translation.
Vec p(3);
p << P(0, 3), P(1, 3), P(2, 3);
// TODO(pau) This sould be done by a SolveLinearSystem(A, b, &x) call.
// TODO(keir) use the eigen LU solver syntax...
Vec3 t = K.inverse() * p;
// scale K so that K(2,2) = 1
K = K / K(2, 2);
*Kp = K;
*Rp = R;
*tp = t;
}
void ProjectionShiftPrincipalPoint(const Mat34 &P,
const Vec2 &principal_point,
const Vec2 &principal_point_new,
Mat34 *P_new) {
Mat3 T;
T << 1, 0, principal_point_new(0) - principal_point(0),
0, 1, principal_point_new(1) - principal_point(1),
0, 0, 1;
*P_new = T * P;
}
void ProjectionChangeAspectRatio(const Mat34 &P,
const Vec2 &principal_point,
double aspect_ratio,
double aspect_ratio_new,
Mat34 *P_new) {
Mat3 T;
T << 1, 0, 0,
0, aspect_ratio_new / aspect_ratio, 0,
0, 0, 1;
Mat34 P_temp;
ProjectionShiftPrincipalPoint(P, principal_point, Vec2(0, 0), &P_temp);
P_temp = T * P_temp;
ProjectionShiftPrincipalPoint(P_temp, Vec2(0, 0), principal_point, P_new);
}
void HomogeneousToEuclidean(const Mat &H, Mat *X) {
int d = H.rows() - 1;
int n = H.cols();
X->resize(d, n);
for (size_t i = 0; i < n; ++i) {
double h = H(d, i);
for (int j = 0; j < d; ++j) {
(*X)(j, i) = H(j, i) / h;
}
}
}
void HomogeneousToEuclidean(const Mat3X &h, Mat2X *e) {
e->resize(2, h.cols());
e->row(0) = h.row(0).array() / h.row(2).array();
e->row(1) = h.row(1).array() / h.row(2).array();
}
void HomogeneousToEuclidean(const Mat4X &h, Mat3X *e) {
e->resize(3, h.cols());
e->row(0) = h.row(0).array() / h.row(3).array();
e->row(1) = h.row(1).array() / h.row(3).array();
e->row(2) = h.row(2).array() / h.row(3).array();
}
void HomogeneousToEuclidean(const Vec3 &H, Vec2 *X) {
double w = H(2);
*X << H(0) / w, H(1) / w;
}
void HomogeneousToEuclidean(const Vec4 &H, Vec3 *X) {
double w = H(3);
*X << H(0) / w, H(1) / w, H(2) / w;
}
void EuclideanToHomogeneous(const Mat &X, Mat *H) {
int d = X.rows();
int n = X.cols();
H->resize(d + 1, n);
H->block(0, 0, d, n) = X;
H->row(d).setOnes();
}
void EuclideanToHomogeneous(const Vec2 &X, Vec3 *H) {
*H << X(0), X(1), 1;
}
void EuclideanToHomogeneous(const Vec3 &X, Vec4 *H) {
*H << X(0), X(1), X(2), 1;
}
// TODO(julien) Call conditioning.h/ApplyTransformationToPoints ?
void EuclideanToNormalizedCamera(const Mat2X &x, const Mat3 &K, Mat2X *n) {
Mat3X x_image_h;
EuclideanToHomogeneous(x, &x_image_h);
Mat3X x_camera_h = K.inverse() * x_image_h;
HomogeneousToEuclidean(x_camera_h, n);
}
void HomogeneousToNormalizedCamera(const Mat3X &x, const Mat3 &K, Mat2X *n) {
Mat3X x_camera_h = K.inverse() * x;
HomogeneousToEuclidean(x_camera_h, n);
}
double Depth(const Mat3 &R, const Vec3 &t, const Vec3 &X) {
return (R*X)(2) + t(2);
}
double Depth(const Mat3 &R, const Vec3 &t, const Vec4 &X) {
Vec3 Xe = X.head<3>() / X(3);
return Depth(R, t, Xe);
}
} // namespace libmv

View File

@@ -1,231 +0,0 @@
// Copyright (c) 2007, 2008 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_MULTIVIEW_PROJECTION_H_
#define LIBMV_MULTIVIEW_PROJECTION_H_
#include "libmv/numeric/numeric.h"
namespace libmv {
void P_From_KRt(const Mat3 &K, const Mat3 &R, const Vec3 &t, Mat34 *P);
void KRt_From_P(const Mat34 &P, Mat3 *K, Mat3 *R, Vec3 *t);
// Applies a change of basis to the image coordinates of the projection matrix
// so that the principal point becomes principal_point_new.
void ProjectionShiftPrincipalPoint(const Mat34 &P,
const Vec2 &principal_point,
const Vec2 &principal_point_new,
Mat34 *P_new);
// Applies a change of basis to the image coordinates of the projection matrix
// so that the aspect ratio becomes aspect_ratio_new. This is done by
// stretching the y axis. The aspect ratio is defined as the quotient between
// the focal length of the y and the x axis.
void ProjectionChangeAspectRatio(const Mat34 &P,
const Vec2 &principal_point,
double aspect_ratio,
double aspect_ratio_new,
Mat34 *P_new);
void HomogeneousToEuclidean(const Mat &H, Mat *X);
void HomogeneousToEuclidean(const Mat3X &h, Mat2X *e);
void HomogeneousToEuclidean(const Mat4X &h, Mat3X *e);
void HomogeneousToEuclidean(const Vec3 &H, Vec2 *X);
void HomogeneousToEuclidean(const Vec4 &H, Vec3 *X);
inline Vec2 HomogeneousToEuclidean(const Vec3 &h) {
return h.head<2>() / h(2);
}
inline Vec3 HomogeneousToEuclidean(const Vec4 &h) {
return h.head<3>() / h(3);
}
inline Mat2X HomogeneousToEuclidean(const Mat3X &h) {
Mat2X e(2, h.cols());
e.row(0) = h.row(0).array() / h.row(2).array();
e.row(1) = h.row(1).array() / h.row(2).array();
return e;
}
void EuclideanToHomogeneous(const Mat &X, Mat *H);
inline Mat3X EuclideanToHomogeneous(const Mat2X &x) {
Mat3X h(3, x.cols());
h.block(0, 0, 2, x.cols()) = x;
h.row(2).setOnes();
return h;
}
inline void EuclideanToHomogeneous(const Mat2X &x, Mat3X *h) {
h->resize(3, x.cols());
h->block(0, 0, 2, x.cols()) = x;
h->row(2).setOnes();
}
inline Mat4X EuclideanToHomogeneous(const Mat3X &x) {
Mat4X h(4, x.cols());
h.block(0, 0, 3, x.cols()) = x;
h.row(3).setOnes();
return h;
}
inline void EuclideanToHomogeneous(const Mat3X &x, Mat4X *h) {
h->resize(4, x.cols());
h->block(0, 0, 3, x.cols()) = x;
h->row(3).setOnes();
}
void EuclideanToHomogeneous(const Vec2 &X, Vec3 *H);
void EuclideanToHomogeneous(const Vec3 &X, Vec4 *H);
inline Vec3 EuclideanToHomogeneous(const Vec2 &x) {
return Vec3(x(0), x(1), 1);
}
inline Vec4 EuclideanToHomogeneous(const Vec3 &x) {
return Vec4(x(0), x(1), x(2), 1);
}
// Conversion from image coordinates to normalized camera coordinates
void EuclideanToNormalizedCamera(const Mat2X &x, const Mat3 &K, Mat2X *n);
void HomogeneousToNormalizedCamera(const Mat3X &x, const Mat3 &K, Mat2X *n);
inline Vec2 Project(const Mat34 &P, const Vec3 &X) {
Vec4 HX;
HX << X, 1.0;
Vec3 hx = P * HX;
return hx.head<2>() / hx(2);
}
inline void Project(const Mat34 &P, const Vec4 &X, Vec3 *x) {
*x = P * X;
}
inline void Project(const Mat34 &P, const Vec4 &X, Vec2 *x) {
Vec3 hx = P * X;
*x = hx.head<2>() / hx(2);
}
inline void Project(const Mat34 &P, const Vec3 &X, Vec3 *x) {
Vec4 HX;
HX << X, 1.0;
Project(P, HX, x);
}
inline void Project(const Mat34 &P, const Vec3 &X, Vec2 *x) {
Vec3 hx;
Project(P, X, x);
*x = hx.head<2>() / hx(2);
}
inline void Project(const Mat34 &P, const Mat4X &X, Mat2X *x) {
x->resize(2, X.cols());
for (int c = 0; c < X.cols(); ++c) {
Vec3 hx = P * X.col(c);
x->col(c) = hx.head<2>() / hx(2);
}
}
inline Mat2X Project(const Mat34 &P, const Mat4X &X) {
Mat2X x;
Project(P, X, &x);
return x;
}
inline void Project(const Mat34 &P, const Mat3X &X, Mat2X *x) {
x->resize(2, X.cols());
for (int c = 0; c < X.cols(); ++c) {
Vec4 HX;
HX << X.col(c), 1.0;
Vec3 hx = P * HX;
x->col(c) = hx.head<2>() / hx(2);
}
}
inline void Project(const Mat34 &P, const Mat3X &X, const Vecu &ids, Mat2X *x) {
x->resize(2, ids.size());
Vec4 HX;
Vec3 hx;
for (int c = 0; c < ids.size(); ++c) {
HX << X.col(ids[c]), 1.0;
hx = P * HX;
x->col(c) = hx.head<2>() / hx(2);
}
}
inline Mat2X Project(const Mat34 &P, const Mat3X &X) {
Mat2X x(2, X.cols());
Project(P, X, &x);
return x;
}
inline Mat2X Project(const Mat34 &P, const Mat3X &X, const Vecu &ids) {
Mat2X x(2, ids.size());
Project(P, X, ids, &x);
return x;
}
double Depth(const Mat3 &R, const Vec3 &t, const Vec3 &X);
double Depth(const Mat3 &R, const Vec3 &t, const Vec4 &X);
/**
* Returns true if the homogenious 3D point X is in front of
* the camera P.
*/
inline bool isInFrontOfCamera(const Mat34 &P, const Vec4 &X) {
double condition_1 = P.row(2).dot(X) * X[3];
double condition_2 = X[2] * X[3];
if (condition_1 > 0 && condition_2 > 0)
return true;
else
return false;
}
inline bool isInFrontOfCamera(const Mat34 &P, const Vec3 &X) {
Vec4 X_homo;
X_homo.segment<3>(0) = X;
X_homo(3) = 1;
return isInFrontOfCamera( P, X_homo);
}
/**
* Transforms a 2D point from pixel image coordinates to a 2D point in
* normalized image coordinates.
*/
inline Vec2 ImageToNormImageCoordinates(Mat3 &Kinverse, Vec2 &x) {
Vec3 x_h = Kinverse*EuclideanToHomogeneous(x);
return HomogeneousToEuclidean( x_h );
}
/// Estimates the root mean square error (2D)
inline double RootMeanSquareError(const Mat2X &x_image,
const Mat4X &X_world,
const Mat34 &P) {
size_t num_points = x_image.cols();
Mat2X dx = Project(P, X_world) - x_image;
return dx.norm() / num_points;
}
/// Estimates the root mean square error (2D)
inline double RootMeanSquareError(const Mat2X &x_image,
const Mat3X &X_world,
const Mat3 &K,
const Mat3 &R,
const Vec3 &t) {
Mat34 P;
P_From_KRt(K, R, t, &P);
size_t num_points = x_image.cols();
Mat2X dx = Project(P, X_world) - x_image;
return dx.norm() / num_points;
}
} // namespace libmv
#endif // LIBMV_MULTIVIEW_PROJECTION_H_

View File

@@ -1,115 +0,0 @@
// Copyright (c) 2007, 2008 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include <iostream>
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
namespace {
using namespace libmv;
TEST(Projection, P_From_KRt) {
Mat3 K, Kp;
K << 10, 1, 30,
0, 20, 40,
0, 0, 1;
Mat3 R, Rp;
R << 1, 0, 0,
0, 1, 0,
0, 0, 1;
Vec3 t, tp;
t << 1, 2, 3;
Mat34 P;
P_From_KRt(K, R, t, &P);
KRt_From_P(P, &Kp, &Rp, &tp);
EXPECT_MATRIX_NEAR(K, Kp, 1e-8);
EXPECT_MATRIX_NEAR(R, Rp, 1e-8);
EXPECT_MATRIX_NEAR(t, tp, 1e-8);
// TODO(keir): Change the code to ensure det(R) == 1, which is not currently
// the case. Also add a test for that here.
}
Vec4 GetRandomPoint() {
Vec4 X;
X.setRandom();
X(3) = 1;
return X;
}
TEST(Projection, isInFrontOfCamera) {
Mat34 P;
P << 1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0;
Vec4 X_front = GetRandomPoint();
Vec4 X_back = GetRandomPoint();
X_front(2) = 10; // Any point in the positive Z direction
// where Z > 1 is infront of the camera.
X_back(2) = -10; // Any point int he negative Z dirstaion
// is behind the camera.
bool res_front = isInFrontOfCamera(P, X_front);
bool res_back = isInFrontOfCamera(P, X_back);
EXPECT_EQ(true, res_front);
EXPECT_EQ(false, res_back);
}
TEST(AutoCalibration, ProjectionShiftPrincipalPoint) {
Mat34 P1, P2;
P1 << 1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0;
P2 << 1, 0, 3, 0,
0, 1, 4, 0,
0, 0, 1, 0;
Mat34 P1_computed, P2_computed;
ProjectionShiftPrincipalPoint(P1, Vec2(0, 0), Vec2(3, 4), &P2_computed);
ProjectionShiftPrincipalPoint(P2, Vec2(3, 4), Vec2(0, 0), &P1_computed);
EXPECT_MATRIX_EQ(P1, P1_computed);
EXPECT_MATRIX_EQ(P2, P2_computed);
}
TEST(AutoCalibration, ProjectionChangeAspectRatio) {
Mat34 P1, P2;
P1 << 1, 0, 3, 0,
0, 1, 4, 0,
0, 0, 1, 0;
P2 << 1, 0, 3, 0,
0, 2, 4, 0,
0, 0, 1, 0;
Mat34 P1_computed, P2_computed;
ProjectionChangeAspectRatio(P1, Vec2(3, 4), 1, 2, &P2_computed);
ProjectionChangeAspectRatio(P2, Vec2(3, 4), 2, 1, &P1_computed);
EXPECT_MATRIX_EQ(P1, P1_computed);
EXPECT_MATRIX_EQ(P2, P2_computed);
}
} // namespace

View File

@@ -1,62 +0,0 @@
// Copyright (c) 2009 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// Compute the projection matrix from a set of 3D points X and their
// projections x = PX in 2D. This is useful if a point cloud is reconstructed.
//
// Algorithm is the standard DLT as described in Hartley & Zisserman, page 179.
#ifndef LIBMV_MULTIVIEW_RESECTION_H
#define LIBMV_MULTIVIEW_RESECTION_H
#include "libmv/logging/logging.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
namespace resection {
// x's are 2D image coordinates, (x,y,1), and X's are homogeneous four vectors.
template<typename T>
void Resection(const Matrix<T, 2, Dynamic> &x,
const Matrix<T, 4, Dynamic> &X,
Matrix<T, 3, 4> *P) {
int N = x.cols();
assert(X.cols() == N);
Matrix<T, Dynamic, 12> design(2*N, 12);
design.setZero();
for (int i = 0; i < N; i++) {
T xi = x(0, i);
T yi = x(1, i);
// See equation (7.2) on page 179 of H&Z.
design.template block<1, 4>(2*i, 4) = -X.col(i).transpose();
design.template block<1, 4>(2*i, 8) = yi*X.col(i).transpose();
design.template block<1, 4>(2*i + 1, 0) = X.col(i).transpose();
design.template block<1, 4>(2*i + 1, 8) = -xi*X.col(i).transpose();
}
Matrix<T, 12, 1> p;
Nullspace(&design, &p);
reshape(p, 3, 4, P);
}
} // namespace resection
} // namespace libmv
#endif // LIBMV_MULTIVIEW_RESECTION_H

View File

@@ -1,61 +0,0 @@
// Copyright (c) 2009 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include <iostream>
#include "libmv/multiview/projection.h"
#include "libmv/multiview/resection.h"
#include "libmv/multiview/test_data_sets.h"
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
#include "libmv/logging/logging.h"
namespace {
using namespace libmv;
using namespace libmv::resection;
TEST(Resection, ThreeViews) {
int nviews = 5;
int npoints = 6;
NViewDataSet d = NRealisticCamerasFull(nviews, npoints);
for (int i = 0; i < nviews; ++i) {
Mat4X X(4, npoints);
X.block(0, 0, 3, npoints) = d.X;
X.row(3).setOnes();
const Mat2X &x = d.x[i];
Mat34 P;
Resection(x, X, &P);
Mat34 P_expected = d.P(i);
// Because the P matrices are homogeneous, it is necessary to be tricky
// about the scale factor to make them match.
P_expected *= 1/P_expected.array().abs().sum();
P *= 1/P.array().abs().sum();
if (!((P(0, 0) > 0 && P_expected(0, 0) > 0) ||
(P(0, 0) < 0 && P_expected(0, 0) < 0))) {
P *= -1;
}
EXPECT_MATRIX_NEAR(P_expected, P, 1e-9);
}
}
} // namespace

View File

@@ -1,196 +0,0 @@
// Copyright (c) 2007, 2008 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/multiview/test_data_sets.h"
#include <cmath>
#include "libmv/numeric/numeric.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/fundamental.h"
namespace libmv {
TwoViewDataSet TwoRealisticCameras(bool same_K) {
TwoViewDataSet d;
d.K1 << 320, 0, 160,
0, 320, 120,
0, 0, 1;
if (same_K) {
d.K2 = d.K1;
} else {
d.K2 << 360, 0, 170,
0, 360, 110,
0, 0, 1;
}
d.R1 = RotationAroundZ(-0.1);
d.R2 = RotationAroundX(-0.1);
d.t1 << 1, 1, 10;
d.t2 << -2, -1, 10;
P_From_KRt(d.K1, d.R1, d.t1, &d.P1);
P_From_KRt(d.K2, d.R2, d.t2, &d.P2);
FundamentalFromProjections(d.P1, d.P2, &d.F);
d.X.resize(3, 30);
d.X.setRandom();
Project(d.P1, d.X, &d.x1);
Project(d.P2, d.X, &d.x2);
return d;
}
nViewDatasetConfigator::nViewDatasetConfigator(int fx , int fy,
int cx, int cy,
double distance,
double jitter_amount) {
_fx = fx;
_fy = fy;
_cx = cx;
_cy = cy;
_dist = distance;
_jitter_amount = jitter_amount;
}
NViewDataSet NRealisticCamerasFull(int nviews, int npoints,
const nViewDatasetConfigator config) {
NViewDataSet d;
d.n = nviews;
d.K.resize(nviews);
d.R.resize(nviews);
d.t.resize(nviews);
d.C.resize(nviews);
d.x.resize(nviews);
d.x_ids.resize(nviews);
d.X.resize(3, npoints);
d.X.setRandom();
d.X *= 0.6;
Vecu all_point_ids(npoints);
for (size_t j = 0; j < npoints; ++j)
all_point_ids[j] = j;
for (size_t i = 0; i < nviews; ++i) {
Vec3 camera_center, t, jitter, lookdir;
double theta = i * 2 * M_PI / nviews;
camera_center << sin(theta), 0.0, cos(theta);
camera_center *= config._dist;
d.C[i] = camera_center;
jitter.setRandom();
jitter *= config._jitter_amount / camera_center.norm();
lookdir = -camera_center + jitter;
d.K[i] << config._fx, 0, config._cx,
0, config._fy, config._cy,
0, 0, 1;
d.R[i] = LookAt(lookdir);
d.t[i] = -d.R[i] * camera_center;
d.x[i] = Project(d.P(i), d.X);
d.x_ids[i] = all_point_ids;
}
return d;
}
NViewDataSet NRealisticCamerasSparse(int nviews, int npoints,
float view_ratio, unsigned min_projections,
const nViewDatasetConfigator config) {
assert(view_ratio <= 1.0);
assert(view_ratio > 0.0);
assert(min_projections <= npoints);
NViewDataSet d;
d.n = nviews;
d.K.resize(nviews);
d.R.resize(nviews);
d.t.resize(nviews);
d.C.resize(nviews);
d.x.resize(nviews);
d.x_ids.resize(nviews);
d.X.resize(3, npoints);
d.X.setRandom();
d.X *= 0.6;
Mat visibility(nviews, npoints);
visibility.setZero();
Mat randoms(nviews, npoints);
randoms.setRandom();
randoms = (randoms.array() + 1)/2.0;
unsigned num_visibles = 0;
for (size_t i = 0; i < nviews; ++i) {
num_visibles = 0;
for (size_t j = 0; j < npoints; j++) {
if (randoms(i, j) <= view_ratio) {
visibility(i, j) = true;
num_visibles++;
}
}
if (num_visibles < min_projections) {
unsigned num_projections_to_add = min_projections - num_visibles;
for (size_t j = 0; j < npoints && num_projections_to_add > 0; ++j) {
if (!visibility(i, j)) {
num_projections_to_add--;
}
}
num_visibles += num_projections_to_add;
}
d.x_ids[i].resize(num_visibles);
d.x[i].resize(2, num_visibles);
}
size_t j_visible = 0;
Vec3 X;
for (size_t i = 0; i < nviews; ++i) {
Vec3 camera_center, t, jitter, lookdir;
double theta = i * 2 * M_PI / nviews;
camera_center << sin(theta), 0.0, cos(theta);
camera_center *= config._dist;
d.C[i] = camera_center;
jitter.setRandom();
jitter *= config._jitter_amount / camera_center.norm();
lookdir = -camera_center + jitter;
d.K[i] << config._fx, 0, config._cx,
0, config._fy, config._cy,
0, 0, 1;
d.R[i] = LookAt(lookdir);
d.t[i] = -d.R[i] * camera_center;
j_visible = 0;
for (size_t j = 0; j < npoints; j++) {
if (visibility(i, j)) {
X = d.X.col(j);
d.x[i].col(j_visible) = Project(d.P(i), X);
d.x_ids[i][j_visible] = j;
j_visible++;
}
}
}
return d;
}
} // namespace libmv

View File

@@ -1,105 +0,0 @@
// Copyright (c) 2007, 2008 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_MULTIVIEW_TEST_DATA_SETS_H_
#define LIBMV_MULTIVIEW_TEST_DATA_SETS_H_
#include "libmv/base/vector.h"
#include "libmv/multiview/fundamental.h"
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
struct TwoViewDataSet {
Mat3 K1, K2; // Internal parameters.
Mat3 R1, R2; // Rotation.
Vec3 t1, t2; // Translation.
Mat34 P1, P2; // Projection matrix, P = K(R|t)
Mat3 F; // Fundamental matrix.
Mat3X X; // 3D points.
Mat2X x1, x2; // Projected points.
};
// Two cameras at (-1,-1,-10) and (2,1,-10) looking approximately towards z+.
TwoViewDataSet TwoRealisticCameras(bool same_K = false);
// An N-view metric dataset . An important difference between this
// and the other reconstruction data types is that all points are seen by all
// cameras.
struct NViewDataSet {
vector<Mat3> K; // Internal parameters (fx, fy, etc).
vector<Mat3> R; // Rotation.
vector<Vec3> t; // Translation.
vector<Vec3> C; // Camera centers.
Mat3X X; // 3D points.
vector<Mat2X> x; // Projected points; may have noise added.
vector<Vecu> x_ids; // Indexes of points corresponding to the projections
int n; // Actual number of cameras.
Mat34 P(int i) {
assert(i < n);
return K[i] * HStack(R[i], t[i]);
}
Mat3 F(int i, int j) {
Mat3 F_;
FundamentalFromProjections(P(i), P(j), &F_);
return F_;
}
void Reproject() {
for (int i = 0; i < n; ++i) {
x[i] = Project(P(i), X);
}
}
// TODO(keir): Add gaussian jitter functions.
};
struct nViewDatasetConfigator {
/// Internal camera parameters
int _fx;
int _fy;
int _cx;
int _cy;
/// Camera random position parameters
double _dist;
double _jitter_amount;
nViewDatasetConfigator(int fx = 1000, int fy = 1000,
int cx = 500, int cy = 500,
double distance = 1.5,
double jitter_amount = 0.01);
};
NViewDataSet NRealisticCamerasFull(int nviews, int npoints,
const nViewDatasetConfigator
config = nViewDatasetConfigator());
// Generates sparse projections (not all points are projected)
NViewDataSet NRealisticCamerasSparse(int nviews, int npoints,
float view_ratio = 0.6,
unsigned min_projections = 3,
const nViewDatasetConfigator
config = nViewDatasetConfigator());
} // namespace libmv
#endif // LIBMV_MULTIVIEW_TEST_DATA_SETS_H_

View File

@@ -1,50 +0,0 @@
// Copyright (c) 2007, 2008 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/multiview/triangulation.h"
#include "libmv/numeric/numeric.h"
#include "libmv/multiview/projection.h"
namespace libmv {
// HZ 12.2 pag.312
void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
const Mat34 &P2, const Vec2 &x2,
Vec4 *X_homogeneous) {
Mat4 design;
for (int i = 0; i < 4; ++i) {
design(0, i) = x1(0) * P1(2, i) - P1(0, i);
design(1, i) = x1(1) * P1(2, i) - P1(1, i);
design(2, i) = x2(0) * P2(2, i) - P2(0, i);
design(3, i) = x2(1) * P2(2, i) - P2(1, i);
}
Nullspace(&design, X_homogeneous);
}
void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
const Mat34 &P2, const Vec2 &x2,
Vec3 *X_euclidean) {
Vec4 X_homogeneous;
TriangulateDLT(P1, x1, P2, x2, &X_homogeneous);
HomogeneousToEuclidean(X_homogeneous, X_euclidean);
}
} // namespace libmv

View File

@@ -1,38 +0,0 @@
// Copyright (c) 2007, 2008 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_MULTIVIEW_TRIANGULATION_H_
#define LIBMV_MULTIVIEW_TRIANGULATION_H_
#include "libmv/numeric/numeric.h"
namespace libmv {
void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
const Mat34 &P2, const Vec2 &x2,
Vec4 *X_homogeneous);
void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
const Mat34 &P2, const Vec2 &x2,
Vec3 *X_euclidean);
} // namespace libmv
#endif // LIBMV_MULTIVIEW_TRIANGULATION_H_

View File

@@ -1,47 +0,0 @@
// Copyright (c) 2007, 2008 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include <iostream>
#include "libmv/multiview/triangulation.h"
#include "libmv/multiview/fundamental.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/test_data_sets.h"
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
namespace {
using namespace libmv;
TEST(Triangulation, TriangulateDLT) {
TwoViewDataSet d = TwoRealisticCameras();
for (int i = 0; i < d.X.cols(); ++i) {
Vec2 x1, x2;
MatrixColumn(d.x1, i, &x1);
MatrixColumn(d.x2, i, &x2);
Vec3 X_estimated, X_gt;
MatrixColumn(d.X, i, &X_gt);
TriangulateDLT(d.P1, x1, d.P2, x2, &X_estimated);
EXPECT_NEAR(0, DistanceLInfinity(X_estimated, X_gt), 1e-8);
}
}
} // namespace

Some files were not shown because too many files have changed in this diff Show More