Ceres: Update to the latest upstream version

Using latest master because of various compilation error fixes.

Brings a lot of recent development. From most interesting parts:

- New threading model.
- Tiny solver.
- Compatibility with C++17.
This commit is contained in:
Sergey Sharybin
2020-06-18 10:12:01 +02:00
parent 171c4fb238
commit 31ae833811
295 changed files with 17678 additions and 9552 deletions

View File

@@ -1245,7 +1245,7 @@ endif()
if(WITH_LIBMV)
# We always have C++11 which includes unordered_map.
set(CERES_DEFINES -DCERES_STD_UNORDERED_MAP)
set(CERES_DEFINES "-DCERES_STD_UNORDERED_MAP;-DCERES_USE_CXX_THREADS")
endif()
#-----------------------------------------------------------------------------

View File

@@ -37,223 +37,279 @@ set(INC_SYS
)
set(SRC
internal/ceres/array_utils.cc
internal/ceres/blas.cc
internal/ceres/block_evaluate_preparer.cc
internal/ceres/block_jacobian_writer.cc
internal/ceres/block_jacobi_preconditioner.cc
internal/ceres/block_random_access_dense_matrix.cc
internal/ceres/block_random_access_diagonal_matrix.cc
internal/ceres/block_random_access_matrix.cc
internal/ceres/block_random_access_sparse_matrix.cc
internal/ceres/block_sparse_matrix.cc
internal/ceres/block_structure.cc
internal/ceres/callbacks.cc
internal/ceres/c_api.cc
internal/ceres/cgnr_solver.cc
internal/ceres/compressed_col_sparse_matrix_utils.cc
internal/ceres/compressed_row_jacobian_writer.cc
internal/ceres/compressed_row_sparse_matrix.cc
internal/ceres/conditioned_cost_function.cc
internal/ceres/conjugate_gradients_solver.cc
internal/ceres/coordinate_descent_minimizer.cc
internal/ceres/corrector.cc
internal/ceres/covariance.cc
internal/ceres/covariance_impl.cc
internal/ceres/dense_normal_cholesky_solver.cc
internal/ceres/dense_qr_solver.cc
internal/ceres/dense_sparse_matrix.cc
internal/ceres/detect_structure.cc
internal/ceres/dogleg_strategy.cc
internal/ceres/dynamic_compressed_row_jacobian_writer.cc
internal/ceres/dynamic_compressed_row_sparse_matrix.cc
internal/ceres/evaluator.cc
internal/ceres/file.cc
internal/ceres/generated/partitioned_matrix_view_d_d_d.cc
internal/ceres/generated/schur_eliminator_d_d_d.cc
internal/ceres/gradient_checker.cc
internal/ceres/gradient_checking_cost_function.cc
internal/ceres/gradient_problem.cc
internal/ceres/gradient_problem_solver.cc
internal/ceres/implicit_schur_complement.cc
internal/ceres/is_close.cc
internal/ceres/iterative_schur_complement_solver.cc
internal/ceres/lapack.cc
internal/ceres/levenberg_marquardt_strategy.cc
internal/ceres/linear_least_squares_problems.cc
internal/ceres/linear_operator.cc
internal/ceres/linear_solver.cc
internal/ceres/line_search.cc
internal/ceres/line_search_direction.cc
internal/ceres/line_search_minimizer.cc
internal/ceres/line_search_preprocessor.cc
internal/ceres/local_parameterization.cc
internal/ceres/loss_function.cc
internal/ceres/low_rank_inverse_hessian.cc
internal/ceres/minimizer.cc
internal/ceres/normal_prior.cc
internal/ceres/parameter_block_ordering.cc
internal/ceres/partitioned_matrix_view.cc
internal/ceres/polynomial.cc
internal/ceres/preconditioner.cc
internal/ceres/preprocessor.cc
internal/ceres/problem.cc
internal/ceres/problem_impl.cc
internal/ceres/program.cc
internal/ceres/reorder_program.cc
internal/ceres/residual_block.cc
internal/ceres/residual_block_utils.cc
internal/ceres/schur_complement_solver.cc
internal/ceres/schur_eliminator.cc
internal/ceres/schur_jacobi_preconditioner.cc
internal/ceres/scratch_evaluate_preparer.cc
internal/ceres/solver.cc
internal/ceres/solver_utils.cc
internal/ceres/sparse_matrix.cc
internal/ceres/sparse_normal_cholesky_solver.cc
internal/ceres/split.cc
internal/ceres/stringprintf.cc
internal/ceres/triplet_sparse_matrix.cc
internal/ceres/trust_region_minimizer.cc
internal/ceres/trust_region_preprocessor.cc
internal/ceres/trust_region_step_evaluator.cc
internal/ceres/trust_region_strategy.cc
internal/ceres/types.cc
internal/ceres/wall_time.cc
internal/ceres/accelerate_sparse.cc
internal/ceres/array_utils.cc
internal/ceres/blas.cc
internal/ceres/block_evaluate_preparer.cc
internal/ceres/block_jacobian_writer.cc
internal/ceres/block_jacobi_preconditioner.cc
internal/ceres/block_random_access_dense_matrix.cc
internal/ceres/block_random_access_diagonal_matrix.cc
internal/ceres/block_random_access_matrix.cc
internal/ceres/block_random_access_sparse_matrix.cc
internal/ceres/block_sparse_matrix.cc
internal/ceres/block_structure.cc
internal/ceres/callbacks.cc
internal/ceres/canonical_views_clustering.cc
internal/ceres/c_api.cc
internal/ceres/cgnr_solver.cc
internal/ceres/compressed_col_sparse_matrix_utils.cc
internal/ceres/compressed_row_jacobian_writer.cc
internal/ceres/compressed_row_sparse_matrix.cc
internal/ceres/conditioned_cost_function.cc
internal/ceres/conjugate_gradients_solver.cc
internal/ceres/context.cc
internal/ceres/context_impl.cc
internal/ceres/coordinate_descent_minimizer.cc
internal/ceres/corrector.cc
internal/ceres/covariance.cc
internal/ceres/covariance_impl.cc
internal/ceres/cxsparse.cc
internal/ceres/dense_normal_cholesky_solver.cc
internal/ceres/dense_qr_solver.cc
internal/ceres/dense_sparse_matrix.cc
internal/ceres/detect_structure.cc
internal/ceres/dogleg_strategy.cc
internal/ceres/dynamic_compressed_row_jacobian_writer.cc
internal/ceres/dynamic_compressed_row_sparse_matrix.cc
internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
internal/ceres/eigensparse.cc
internal/ceres/evaluator.cc
internal/ceres/file.cc
internal/ceres/float_cxsparse.cc
internal/ceres/float_suitesparse.cc
internal/ceres/function_sample.cc
internal/ceres/generated/partitioned_matrix_view_d_d_d.cc
internal/ceres/generated/schur_eliminator_d_d_d.cc
internal/ceres/gradient_checker.cc
internal/ceres/gradient_checking_cost_function.cc
internal/ceres/gradient_problem.cc
internal/ceres/gradient_problem_solver.cc
internal/ceres/implicit_schur_complement.cc
internal/ceres/inner_product_computer.cc
internal/ceres/is_close.cc
internal/ceres/iterative_refiner.cc
internal/ceres/iterative_schur_complement_solver.cc
internal/ceres/lapack.cc
internal/ceres/levenberg_marquardt_strategy.cc
internal/ceres/linear_least_squares_problems.cc
internal/ceres/linear_operator.cc
internal/ceres/linear_solver.cc
internal/ceres/line_search.cc
internal/ceres/line_search_direction.cc
internal/ceres/line_search_minimizer.cc
internal/ceres/line_search_preprocessor.cc
internal/ceres/local_parameterization.cc
internal/ceres/loss_function.cc
internal/ceres/low_rank_inverse_hessian.cc
internal/ceres/minimizer.cc
internal/ceres/normal_prior.cc
internal/ceres/parallel_for_cxx.cc
internal/ceres/parallel_for_nothreads.cc
internal/ceres/parallel_for_openmp.cc
internal/ceres/parallel_utils.cc
internal/ceres/parameter_block_ordering.cc
internal/ceres/partitioned_matrix_view.cc
internal/ceres/polynomial.cc
internal/ceres/preconditioner.cc
internal/ceres/preprocessor.cc
internal/ceres/problem.cc
internal/ceres/problem_impl.cc
internal/ceres/program.cc
internal/ceres/reorder_program.cc
internal/ceres/residual_block.cc
internal/ceres/residual_block_utils.cc
internal/ceres/schur_complement_solver.cc
internal/ceres/schur_eliminator.cc
internal/ceres/schur_jacobi_preconditioner.cc
internal/ceres/schur_templates.cc
internal/ceres/scratch_evaluate_preparer.cc
internal/ceres/single_linkage_clustering.cc
internal/ceres/solver.cc
internal/ceres/solver_utils.cc
internal/ceres/sparse_cholesky.cc
internal/ceres/sparse_matrix.cc
internal/ceres/sparse_normal_cholesky_solver.cc
internal/ceres/split.cc
internal/ceres/stringprintf.cc
internal/ceres/subset_preconditioner.cc
internal/ceres/suitesparse.cc
internal/ceres/thread_pool.cc
internal/ceres/thread_token_provider.cc
internal/ceres/triplet_sparse_matrix.cc
internal/ceres/trust_region_minimizer.cc
internal/ceres/trust_region_preprocessor.cc
internal/ceres/trust_region_step_evaluator.cc
internal/ceres/trust_region_strategy.cc
internal/ceres/types.cc
internal/ceres/visibility_based_preconditioner.cc
internal/ceres/visibility.cc
internal/ceres/wall_time.cc
include/ceres/autodiff_cost_function.h
include/ceres/autodiff_local_parameterization.h
include/ceres/c_api.h
include/ceres/ceres.h
include/ceres/conditioned_cost_function.h
include/ceres/cost_function.h
include/ceres/cost_function_to_functor.h
include/ceres/covariance.h
include/ceres/crs_matrix.h
include/ceres/dynamic_autodiff_cost_function.h
include/ceres/dynamic_cost_function_to_functor.h
include/ceres/dynamic_numeric_diff_cost_function.h
include/ceres/fpclassify.h
include/ceres/gradient_checker.h
include/ceres/gradient_problem.h
include/ceres/gradient_problem_solver.h
include/ceres/internal/autodiff.h
include/ceres/internal/disable_warnings.h
include/ceres/internal/eigen.h
include/ceres/internal/fixed_array.h
include/ceres/internal/macros.h
include/ceres/internal/manual_constructor.h
include/ceres/internal/numeric_diff.h
include/ceres/internal/port.h
include/ceres/internal/reenable_warnings.h
include/ceres/internal/scoped_ptr.h
include/ceres/internal/variadic_evaluate.h
include/ceres/iteration_callback.h
include/ceres/jet.h
include/ceres/local_parameterization.h
include/ceres/loss_function.h
include/ceres/normal_prior.h
include/ceres/numeric_diff_cost_function.h
include/ceres/numeric_diff_options.h
include/ceres/ordered_groups.h
include/ceres/problem.h
include/ceres/rotation.h
include/ceres/sized_cost_function.h
include/ceres/solver.h
include/ceres/types.h
include/ceres/version.h
internal/ceres/array_utils.h
internal/ceres/blas.h
internal/ceres/block_evaluate_preparer.h
internal/ceres/block_jacobian_writer.h
internal/ceres/block_jacobi_preconditioner.h
internal/ceres/block_random_access_dense_matrix.h
internal/ceres/block_random_access_diagonal_matrix.h
internal/ceres/block_random_access_matrix.h
internal/ceres/block_random_access_sparse_matrix.h
internal/ceres/block_sparse_matrix.h
internal/ceres/block_structure.h
internal/ceres/callbacks.h
internal/ceres/casts.h
internal/ceres/cgnr_linear_operator.h
internal/ceres/cgnr_solver.h
internal/ceres/collections_port.h
internal/ceres/compressed_col_sparse_matrix_utils.h
internal/ceres/compressed_row_jacobian_writer.h
internal/ceres/compressed_row_sparse_matrix.h
internal/ceres/conjugate_gradients_solver.h
internal/ceres/coordinate_descent_minimizer.h
internal/ceres/corrector.h
internal/ceres/covariance_impl.h
internal/ceres/cxsparse.h
internal/ceres/dense_jacobian_writer.h
internal/ceres/dense_normal_cholesky_solver.h
internal/ceres/dense_qr_solver.h
internal/ceres/dense_sparse_matrix.h
internal/ceres/detect_structure.h
internal/ceres/dogleg_strategy.h
internal/ceres/dynamic_compressed_row_finalizer.h
internal/ceres/dynamic_compressed_row_jacobian_writer.h
internal/ceres/dynamic_compressed_row_sparse_matrix.h
internal/ceres/evaluator.h
internal/ceres/execution_summary.h
internal/ceres/file.h
internal/ceres/gradient_checking_cost_function.h
internal/ceres/gradient_problem_evaluator.h
internal/ceres/graph_algorithms.h
internal/ceres/graph.h
internal/ceres/householder_vector.h
internal/ceres/implicit_schur_complement.h
internal/ceres/integral_types.h
internal/ceres/is_close.h
internal/ceres/iterative_schur_complement_solver.h
internal/ceres/lapack.h
internal/ceres/levenberg_marquardt_strategy.h
internal/ceres/linear_least_squares_problems.h
internal/ceres/linear_operator.h
internal/ceres/linear_solver.h
internal/ceres/line_search_direction.h
internal/ceres/line_search.h
internal/ceres/line_search_minimizer.h
internal/ceres/line_search_preprocessor.h
internal/ceres/low_rank_inverse_hessian.h
internal/ceres/map_util.h
internal/ceres/minimizer.h
internal/ceres/mutex.h
internal/ceres/parameter_block.h
internal/ceres/parameter_block_ordering.h
internal/ceres/partitioned_matrix_view.h
internal/ceres/partitioned_matrix_view_impl.h
internal/ceres/polynomial.h
internal/ceres/preconditioner.h
internal/ceres/preprocessor.h
internal/ceres/problem_impl.h
internal/ceres/program_evaluator.h
internal/ceres/program.h
internal/ceres/random.h
internal/ceres/reorder_program.h
internal/ceres/residual_block.h
internal/ceres/residual_block_utils.h
internal/ceres/schur_complement_solver.h
internal/ceres/schur_eliminator.h
internal/ceres/schur_eliminator_impl.h
internal/ceres/schur_jacobi_preconditioner.h
internal/ceres/scratch_evaluate_preparer.h
internal/ceres/small_blas.h
internal/ceres/solver_utils.h
internal/ceres/sparse_matrix.h
internal/ceres/sparse_normal_cholesky_solver.h
internal/ceres/split.h
internal/ceres/stl_util.h
internal/ceres/stringprintf.h
internal/ceres/suitesparse.h
internal/ceres/triplet_sparse_matrix.h
internal/ceres/trust_region_minimizer.h
internal/ceres/trust_region_preprocessor.h
internal/ceres/trust_region_step_evaluator.h
internal/ceres/trust_region_strategy.h
internal/ceres/visibility_based_preconditioner.h
internal/ceres/wall_time.h
include/ceres/autodiff_cost_function.h
include/ceres/autodiff_first_order_function.h
include/ceres/autodiff_local_parameterization.h
include/ceres/c_api.h
include/ceres/ceres.h
include/ceres/conditioned_cost_function.h
include/ceres/context.h
include/ceres/cost_function.h
include/ceres/cost_function_to_functor.h
include/ceres/covariance.h
include/ceres/crs_matrix.h
include/ceres/cubic_interpolation.h
include/ceres/dynamic_autodiff_cost_function.h
include/ceres/dynamic_cost_function.h
include/ceres/dynamic_cost_function_to_functor.h
include/ceres/dynamic_numeric_diff_cost_function.h
include/ceres/evaluation_callback.h
include/ceres/first_order_function.h
include/ceres/gradient_checker.h
include/ceres/gradient_problem.h
include/ceres/gradient_problem_solver.h
include/ceres/internal/array_selector.h
include/ceres/internal/autodiff.h
include/ceres/internal/disable_warnings.h
include/ceres/internal/eigen.h
include/ceres/internal/fixed_array.h
include/ceres/internal/householder_vector.h
include/ceres/internal/integer_sequence_algorithm.h
include/ceres/internal/line_parameterization.h
include/ceres/internal/memory.h
include/ceres/internal/numeric_diff.h
include/ceres/internal/parameter_dims.h
include/ceres/internal/port.h
include/ceres/internal/reenable_warnings.h
include/ceres/internal/variadic_evaluate.h
include/ceres/iteration_callback.h
include/ceres/jet.h
include/ceres/local_parameterization.h
include/ceres/loss_function.h
include/ceres/normal_prior.h
include/ceres/numeric_diff_cost_function.h
include/ceres/numeric_diff_options.h
include/ceres/ordered_groups.h
include/ceres/problem.h
include/ceres/rotation.h
include/ceres/sized_cost_function.h
include/ceres/solver.h
include/ceres/tiny_solver_autodiff_function.h
include/ceres/tiny_solver_cost_function_adapter.h
include/ceres/tiny_solver.h
include/ceres/types.h
include/ceres/version.h
internal/ceres/accelerate_sparse.h
internal/ceres/array_utils.h
internal/ceres/blas.h
internal/ceres/block_evaluate_preparer.h
internal/ceres/block_jacobian_writer.h
internal/ceres/block_jacobi_preconditioner.h
internal/ceres/block_random_access_dense_matrix.h
internal/ceres/block_random_access_diagonal_matrix.h
internal/ceres/block_random_access_matrix.h
internal/ceres/block_random_access_sparse_matrix.h
internal/ceres/block_sparse_matrix.h
internal/ceres/block_structure.h
internal/ceres/callbacks.h
internal/ceres/canonical_views_clustering.h
internal/ceres/casts.h
internal/ceres/cgnr_linear_operator.h
internal/ceres/cgnr_solver.h
internal/ceres/compressed_col_sparse_matrix_utils.h
internal/ceres/compressed_row_jacobian_writer.h
internal/ceres/compressed_row_sparse_matrix.h
internal/ceres/concurrent_queue.h
internal/ceres/conjugate_gradients_solver.h
internal/ceres/context_impl.h
internal/ceres/coordinate_descent_minimizer.h
internal/ceres/corrector.h
internal/ceres/covariance_impl.h
internal/ceres/cxsparse.h
internal/ceres/dense_jacobian_writer.h
internal/ceres/dense_normal_cholesky_solver.h
internal/ceres/dense_qr_solver.h
internal/ceres/dense_sparse_matrix.h
internal/ceres/detect_structure.h
internal/ceres/dogleg_strategy.h
internal/ceres/dynamic_compressed_row_finalizer.h
internal/ceres/dynamic_compressed_row_jacobian_writer.h
internal/ceres/dynamic_compressed_row_sparse_matrix.h
internal/ceres/dynamic_sparse_normal_cholesky_solver.h
internal/ceres/eigensparse.h
internal/ceres/evaluator.h
internal/ceres/execution_summary.h
internal/ceres/file.h
internal/ceres/float_cxsparse.h
internal/ceres/float_suitesparse.h
internal/ceres/function_sample.h
internal/ceres/gradient_checking_cost_function.h
internal/ceres/gradient_problem_evaluator.h
internal/ceres/graph_algorithms.h
internal/ceres/graph.h
internal/ceres/implicit_schur_complement.h
internal/ceres/inner_product_computer.h
internal/ceres/invert_psd_matrix.h
internal/ceres/is_close.h
internal/ceres/iterative_refiner.h
internal/ceres/iterative_schur_complement_solver.h
internal/ceres/lapack.h
internal/ceres/levenberg_marquardt_strategy.h
internal/ceres/linear_least_squares_problems.h
internal/ceres/linear_operator.h
internal/ceres/linear_solver.h
internal/ceres/line_search_direction.h
internal/ceres/line_search.h
internal/ceres/line_search_minimizer.h
internal/ceres/line_search_preprocessor.h
internal/ceres/low_rank_inverse_hessian.h
internal/ceres/map_util.h
internal/ceres/minimizer.h
internal/ceres/pair_hash.h
internal/ceres/parallel_for.h
internal/ceres/parallel_utils.h
internal/ceres/parameter_block.h
internal/ceres/parameter_block_ordering.h
internal/ceres/partitioned_matrix_view.h
internal/ceres/partitioned_matrix_view_impl.h
internal/ceres/polynomial.h
internal/ceres/preconditioner.h
internal/ceres/preprocessor.h
internal/ceres/problem_impl.h
internal/ceres/program_evaluator.h
internal/ceres/program.h
internal/ceres/random.h
internal/ceres/reorder_program.h
internal/ceres/residual_block.h
internal/ceres/residual_block_utils.h
internal/ceres/schur_complement_solver.h
internal/ceres/schur_eliminator.h
internal/ceres/schur_eliminator_impl.h
internal/ceres/schur_jacobi_preconditioner.h
internal/ceres/schur_templates.h
internal/ceres/scoped_thread_token.h
internal/ceres/scratch_evaluate_preparer.h
internal/ceres/single_linkage_clustering.h
internal/ceres/small_blas_generic.h
internal/ceres/small_blas.h
internal/ceres/solver_utils.h
internal/ceres/sparse_cholesky.h
internal/ceres/sparse_matrix.h
internal/ceres/sparse_normal_cholesky_solver.h
internal/ceres/split.h
internal/ceres/stl_util.h
internal/ceres/stringprintf.h
internal/ceres/subset_preconditioner.h
internal/ceres/suitesparse.h
internal/ceres/thread_pool.h
internal/ceres/thread_token_provider.h
internal/ceres/triplet_sparse_matrix.h
internal/ceres/trust_region_minimizer.h
internal/ceres/trust_region_preprocessor.h
internal/ceres/trust_region_step_evaluator.h
internal/ceres/trust_region_strategy.h
internal/ceres/visibility_based_preconditioner.h
internal/ceres/visibility.h
internal/ceres/wall_time.h
)
set(LIB
@@ -263,44 +319,48 @@ set(LIB
if(WITH_LIBMV_SCHUR_SPECIALIZATIONS)
list(APPEND SRC
internal/ceres/generated/partitioned_matrix_view_2_2_2.cc
internal/ceres/generated/partitioned_matrix_view_2_2_3.cc
internal/ceres/generated/partitioned_matrix_view_2_2_4.cc
internal/ceres/generated/partitioned_matrix_view_2_2_d.cc
internal/ceres/generated/partitioned_matrix_view_2_3_3.cc
internal/ceres/generated/partitioned_matrix_view_2_3_4.cc
internal/ceres/generated/partitioned_matrix_view_2_3_6.cc
internal/ceres/generated/partitioned_matrix_view_2_3_9.cc
internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
internal/ceres/generated/partitioned_matrix_view_2_4_4.cc
internal/ceres/generated/partitioned_matrix_view_2_4_8.cc
internal/ceres/generated/partitioned_matrix_view_2_4_9.cc
internal/ceres/generated/partitioned_matrix_view_2_4_d.cc
internal/ceres/generated/partitioned_matrix_view_2_d_d.cc
internal/ceres/generated/partitioned_matrix_view_4_4_2.cc
internal/ceres/generated/partitioned_matrix_view_4_4_3.cc
internal/ceres/generated/partitioned_matrix_view_4_4_4.cc
internal/ceres/generated/partitioned_matrix_view_4_4_d.cc
internal/ceres/generated/schur_eliminator_2_2_2.cc
internal/ceres/generated/schur_eliminator_2_2_3.cc
internal/ceres/generated/schur_eliminator_2_2_4.cc
internal/ceres/generated/schur_eliminator_2_2_d.cc
internal/ceres/generated/schur_eliminator_2_3_3.cc
internal/ceres/generated/schur_eliminator_2_3_4.cc
internal/ceres/generated/schur_eliminator_2_3_6.cc
internal/ceres/generated/schur_eliminator_2_3_9.cc
internal/ceres/generated/schur_eliminator_2_3_d.cc
internal/ceres/generated/schur_eliminator_2_4_3.cc
internal/ceres/generated/schur_eliminator_2_4_4.cc
internal/ceres/generated/schur_eliminator_2_4_8.cc
internal/ceres/generated/schur_eliminator_2_4_9.cc
internal/ceres/generated/schur_eliminator_2_4_d.cc
internal/ceres/generated/schur_eliminator_2_d_d.cc
internal/ceres/generated/schur_eliminator_4_4_2.cc
internal/ceres/generated/schur_eliminator_4_4_3.cc
internal/ceres/generated/schur_eliminator_4_4_4.cc
internal/ceres/generated/schur_eliminator_4_4_d.cc
internal/ceres/generated/partitioned_matrix_view_2_2_2.cc
internal/ceres/generated/partitioned_matrix_view_2_2_3.cc
internal/ceres/generated/partitioned_matrix_view_2_2_4.cc
internal/ceres/generated/partitioned_matrix_view_2_2_d.cc
internal/ceres/generated/partitioned_matrix_view_2_3_3.cc
internal/ceres/generated/partitioned_matrix_view_2_3_4.cc
internal/ceres/generated/partitioned_matrix_view_2_3_6.cc
internal/ceres/generated/partitioned_matrix_view_2_3_9.cc
internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
internal/ceres/generated/partitioned_matrix_view_2_4_4.cc
internal/ceres/generated/partitioned_matrix_view_2_4_6.cc
internal/ceres/generated/partitioned_matrix_view_2_4_8.cc
internal/ceres/generated/partitioned_matrix_view_2_4_9.cc
internal/ceres/generated/partitioned_matrix_view_2_4_d.cc
internal/ceres/generated/partitioned_matrix_view_2_d_d.cc
internal/ceres/generated/partitioned_matrix_view_3_3_3.cc
internal/ceres/generated/partitioned_matrix_view_4_4_2.cc
internal/ceres/generated/partitioned_matrix_view_4_4_3.cc
internal/ceres/generated/partitioned_matrix_view_4_4_4.cc
internal/ceres/generated/partitioned_matrix_view_4_4_d.cc
internal/ceres/generated/schur_eliminator_2_2_2.cc
internal/ceres/generated/schur_eliminator_2_2_3.cc
internal/ceres/generated/schur_eliminator_2_2_4.cc
internal/ceres/generated/schur_eliminator_2_2_d.cc
internal/ceres/generated/schur_eliminator_2_3_3.cc
internal/ceres/generated/schur_eliminator_2_3_4.cc
internal/ceres/generated/schur_eliminator_2_3_6.cc
internal/ceres/generated/schur_eliminator_2_3_9.cc
internal/ceres/generated/schur_eliminator_2_3_d.cc
internal/ceres/generated/schur_eliminator_2_4_3.cc
internal/ceres/generated/schur_eliminator_2_4_4.cc
internal/ceres/generated/schur_eliminator_2_4_6.cc
internal/ceres/generated/schur_eliminator_2_4_8.cc
internal/ceres/generated/schur_eliminator_2_4_9.cc
internal/ceres/generated/schur_eliminator_2_4_d.cc
internal/ceres/generated/schur_eliminator_2_d_d.cc
internal/ceres/generated/schur_eliminator_3_3_3.cc
internal/ceres/generated/schur_eliminator_4_4_2.cc
internal/ceres/generated/schur_eliminator_4_4_3.cc
internal/ceres/generated/schur_eliminator_4_4_4.cc
internal/ceres/generated/schur_eliminator_4_4_d.cc
)
else()
add_definitions(-DCERES_RESTRICT_SCHUR_SPECIALIZATION)
@@ -315,13 +375,9 @@ add_definitions(
-DCERES_NO_SUITESPARSE
-DCERES_NO_CXSPARSE
-DCERES_NO_LAPACK
-DCERES_NO_ACCELERATE_SPARSE
-DCERES_HAVE_RWLOCK
-DCERES_USE_CXX_THREADS
)
if(WITH_OPENMP)
add_definitions(
-DCERES_USE_OPENMP
)
endif()
blender_add_lib(extern_ceres "${SRC}" "${INC}" "${INC_SYS}" "${LIB}")

1095
extern/ceres/ChangeLog vendored

File diff suppressed because it is too large Load Diff

View File

@@ -9,7 +9,6 @@ fi
repo="https://ceres-solver.googlesource.com/ceres-solver"
branch="master"
#tag="1.4.0"
tag=""
tmp=`mktemp -d`
checkout="$tmp/ceres"
@@ -157,14 +156,10 @@ add_definitions(
-DCERES_NO_SUITESPARSE
-DCERES_NO_CXSPARSE
-DCERES_NO_LAPACK
-DCERES_NO_ACCELERATE_SPARSE
-DCERES_HAVE_RWLOCK
-DCERES_USE_CXX_THREADS
)
if(WITH_OPENMP)
add_definitions(
-DCERES_USE_OPENMP
)
endif()
blender_add_lib(extern_ceres "\${SRC}" "\${INC}" "\${INC_SYS}" "\${LIB}")
EOF

View File

@@ -1,29 +1,37 @@
include/ceres/autodiff_cost_function.h
include/ceres/autodiff_first_order_function.h
include/ceres/autodiff_local_parameterization.h
include/ceres/c_api.h
include/ceres/ceres.h
include/ceres/conditioned_cost_function.h
include/ceres/context.h
include/ceres/cost_function.h
include/ceres/cost_function_to_functor.h
include/ceres/covariance.h
include/ceres/crs_matrix.h
include/ceres/cubic_interpolation.h
include/ceres/dynamic_autodiff_cost_function.h
include/ceres/dynamic_cost_function.h
include/ceres/dynamic_cost_function_to_functor.h
include/ceres/dynamic_numeric_diff_cost_function.h
include/ceres/fpclassify.h
include/ceres/evaluation_callback.h
include/ceres/first_order_function.h
include/ceres/gradient_checker.h
include/ceres/gradient_problem.h
include/ceres/gradient_problem_solver.h
include/ceres/internal/array_selector.h
include/ceres/internal/autodiff.h
include/ceres/internal/disable_warnings.h
include/ceres/internal/eigen.h
include/ceres/internal/fixed_array.h
include/ceres/internal/macros.h
include/ceres/internal/manual_constructor.h
include/ceres/internal/householder_vector.h
include/ceres/internal/integer_sequence_algorithm.h
include/ceres/internal/line_parameterization.h
include/ceres/internal/memory.h
include/ceres/internal/numeric_diff.h
include/ceres/internal/parameter_dims.h
include/ceres/internal/port.h
include/ceres/internal/reenable_warnings.h
include/ceres/internal/scoped_ptr.h
include/ceres/internal/variadic_evaluate.h
include/ceres/iteration_callback.h
include/ceres/jet.h
@@ -37,8 +45,13 @@ include/ceres/problem.h
include/ceres/rotation.h
include/ceres/sized_cost_function.h
include/ceres/solver.h
include/ceres/tiny_solver_autodiff_function.h
include/ceres/tiny_solver_cost_function_adapter.h
include/ceres/tiny_solver.h
include/ceres/types.h
include/ceres/version.h
internal/ceres/accelerate_sparse.cc
internal/ceres/accelerate_sparse.h
internal/ceres/array_utils.cc
internal/ceres/array_utils.h
internal/ceres/blas.cc
@@ -63,21 +76,26 @@ internal/ceres/block_structure.cc
internal/ceres/block_structure.h
internal/ceres/callbacks.cc
internal/ceres/callbacks.h
internal/ceres/canonical_views_clustering.cc
internal/ceres/canonical_views_clustering.h
internal/ceres/c_api.cc
internal/ceres/casts.h
internal/ceres/cgnr_linear_operator.h
internal/ceres/cgnr_solver.cc
internal/ceres/cgnr_solver.h
internal/ceres/collections_port.h
internal/ceres/compressed_col_sparse_matrix_utils.cc
internal/ceres/compressed_col_sparse_matrix_utils.h
internal/ceres/compressed_row_jacobian_writer.cc
internal/ceres/compressed_row_jacobian_writer.h
internal/ceres/compressed_row_sparse_matrix.cc
internal/ceres/compressed_row_sparse_matrix.h
internal/ceres/concurrent_queue.h
internal/ceres/conditioned_cost_function.cc
internal/ceres/conjugate_gradients_solver.cc
internal/ceres/conjugate_gradients_solver.h
internal/ceres/context.cc
internal/ceres/context_impl.cc
internal/ceres/context_impl.h
internal/ceres/coordinate_descent_minimizer.cc
internal/ceres/coordinate_descent_minimizer.h
internal/ceres/corrector.cc
@@ -85,6 +103,7 @@ internal/ceres/corrector.h
internal/ceres/covariance.cc
internal/ceres/covariance_impl.cc
internal/ceres/covariance_impl.h
internal/ceres/cxsparse.cc
internal/ceres/cxsparse.h
internal/ceres/dense_jacobian_writer.h
internal/ceres/dense_normal_cholesky_solver.cc
@@ -102,11 +121,21 @@ internal/ceres/dynamic_compressed_row_jacobian_writer.cc
internal/ceres/dynamic_compressed_row_jacobian_writer.h
internal/ceres/dynamic_compressed_row_sparse_matrix.cc
internal/ceres/dynamic_compressed_row_sparse_matrix.h
internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
internal/ceres/dynamic_sparse_normal_cholesky_solver.h
internal/ceres/eigensparse.cc
internal/ceres/eigensparse.h
internal/ceres/evaluator.cc
internal/ceres/evaluator.h
internal/ceres/execution_summary.h
internal/ceres/file.cc
internal/ceres/file.h
internal/ceres/float_cxsparse.cc
internal/ceres/float_cxsparse.h
internal/ceres/float_suitesparse.cc
internal/ceres/float_suitesparse.h
internal/ceres/function_sample.cc
internal/ceres/function_sample.h
internal/ceres/generated/partitioned_matrix_view_2_2_2.cc
internal/ceres/generated/partitioned_matrix_view_2_2_3.cc
internal/ceres/generated/partitioned_matrix_view_2_2_4.cc
@@ -118,10 +147,12 @@ internal/ceres/generated/partitioned_matrix_view_2_3_9.cc
internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
internal/ceres/generated/partitioned_matrix_view_2_4_4.cc
internal/ceres/generated/partitioned_matrix_view_2_4_6.cc
internal/ceres/generated/partitioned_matrix_view_2_4_8.cc
internal/ceres/generated/partitioned_matrix_view_2_4_9.cc
internal/ceres/generated/partitioned_matrix_view_2_4_d.cc
internal/ceres/generated/partitioned_matrix_view_2_d_d.cc
internal/ceres/generated/partitioned_matrix_view_3_3_3.cc
internal/ceres/generated/partitioned_matrix_view_4_4_2.cc
internal/ceres/generated/partitioned_matrix_view_4_4_3.cc
internal/ceres/generated/partitioned_matrix_view_4_4_4.cc
@@ -138,17 +169,18 @@ internal/ceres/generated/schur_eliminator_2_3_9.cc
internal/ceres/generated/schur_eliminator_2_3_d.cc
internal/ceres/generated/schur_eliminator_2_4_3.cc
internal/ceres/generated/schur_eliminator_2_4_4.cc
internal/ceres/generated/schur_eliminator_2_4_6.cc
internal/ceres/generated/schur_eliminator_2_4_8.cc
internal/ceres/generated/schur_eliminator_2_4_9.cc
internal/ceres/generated/schur_eliminator_2_4_d.cc
internal/ceres/generated/schur_eliminator_2_d_d.cc
internal/ceres/generated/schur_eliminator_3_3_3.cc
internal/ceres/generated/schur_eliminator_4_4_2.cc
internal/ceres/generated/schur_eliminator_4_4_3.cc
internal/ceres/generated/schur_eliminator_4_4_4.cc
internal/ceres/generated/schur_eliminator_4_4_d.cc
internal/ceres/generated/schur_eliminator_d_d_d.cc
internal/ceres/generate_eliminator_specialization.py
internal/ceres/generate_partitioned_matrix_view_specializations.py
internal/ceres/generate_template_specializations.py
internal/ceres/gradient_checker.cc
internal/ceres/gradient_checking_cost_function.cc
internal/ceres/gradient_checking_cost_function.h
@@ -157,12 +189,15 @@ internal/ceres/gradient_problem_evaluator.h
internal/ceres/gradient_problem_solver.cc
internal/ceres/graph_algorithms.h
internal/ceres/graph.h
internal/ceres/householder_vector.h
internal/ceres/implicit_schur_complement.cc
internal/ceres/implicit_schur_complement.h
internal/ceres/integral_types.h
internal/ceres/inner_product_computer.cc
internal/ceres/inner_product_computer.h
internal/ceres/invert_psd_matrix.h
internal/ceres/is_close.cc
internal/ceres/is_close.h
internal/ceres/iterative_refiner.cc
internal/ceres/iterative_refiner.h
internal/ceres/iterative_schur_complement_solver.cc
internal/ceres/iterative_schur_complement_solver.h
internal/ceres/lapack.cc
@@ -190,14 +225,21 @@ internal/ceres/low_rank_inverse_hessian.h
internal/ceres/map_util.h
internal/ceres/minimizer.cc
internal/ceres/minimizer.h
internal/ceres/mutex.h
internal/ceres/normal_prior.cc
internal/ceres/pair_hash.h
internal/ceres/parallel_for_cxx.cc
internal/ceres/parallel_for.h
internal/ceres/parallel_for_nothreads.cc
internal/ceres/parallel_for_openmp.cc
internal/ceres/parallel_utils.cc
internal/ceres/parallel_utils.h
internal/ceres/parameter_block.h
internal/ceres/parameter_block_ordering.cc
internal/ceres/parameter_block_ordering.h
internal/ceres/partitioned_matrix_view.cc
internal/ceres/partitioned_matrix_view.h
internal/ceres/partitioned_matrix_view_impl.h
internal/ceres/partitioned_matrix_view_template.py
internal/ceres/polynomial.cc
internal/ceres/polynomial.h
internal/ceres/preconditioner.cc
@@ -222,14 +264,23 @@ internal/ceres/schur_complement_solver.h
internal/ceres/schur_eliminator.cc
internal/ceres/schur_eliminator.h
internal/ceres/schur_eliminator_impl.h
internal/ceres/schur_eliminator_template.py
internal/ceres/schur_jacobi_preconditioner.cc
internal/ceres/schur_jacobi_preconditioner.h
internal/ceres/schur_templates.cc
internal/ceres/schur_templates.h
internal/ceres/scoped_thread_token.h
internal/ceres/scratch_evaluate_preparer.cc
internal/ceres/scratch_evaluate_preparer.h
internal/ceres/single_linkage_clustering.cc
internal/ceres/single_linkage_clustering.h
internal/ceres/small_blas_generic.h
internal/ceres/small_blas.h
internal/ceres/solver.cc
internal/ceres/solver_utils.cc
internal/ceres/solver_utils.h
internal/ceres/sparse_cholesky.cc
internal/ceres/sparse_cholesky.h
internal/ceres/sparse_matrix.cc
internal/ceres/sparse_matrix.h
internal/ceres/sparse_normal_cholesky_solver.cc
@@ -239,7 +290,14 @@ internal/ceres/split.h
internal/ceres/stl_util.h
internal/ceres/stringprintf.cc
internal/ceres/stringprintf.h
internal/ceres/subset_preconditioner.cc
internal/ceres/subset_preconditioner.h
internal/ceres/suitesparse.cc
internal/ceres/suitesparse.h
internal/ceres/thread_pool.cc
internal/ceres/thread_pool.h
internal/ceres/thread_token_provider.cc
internal/ceres/thread_token_provider.h
internal/ceres/triplet_sparse_matrix.cc
internal/ceres/triplet_sparse_matrix.h
internal/ceres/trust_region_minimizer.cc
@@ -251,7 +309,10 @@ internal/ceres/trust_region_step_evaluator.h
internal/ceres/trust_region_strategy.cc
internal/ceres/trust_region_strategy.h
internal/ceres/types.cc
internal/ceres/visibility_based_preconditioner.cc
internal/ceres/visibility_based_preconditioner.h
internal/ceres/visibility.cc
internal/ceres/visibility.h
internal/ceres/wall_time.cc
internal/ceres/wall_time.h
config/ceres/internal/config.h

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -30,7 +30,7 @@
//
// Create CostFunctions as needed by the least squares framework, with
// Jacobians computed via automatic differentiation. For more
// information on automatic differentation, see the wikipedia article
// information on automatic differentiation, see the wikipedia article
// at http://en.wikipedia.org/wiki/Automatic_differentiation
//
// To get an auto differentiated cost function, you must define a class with a
@@ -54,7 +54,7 @@
// for a series of measurements, where there is an instance of the cost function
// for each measurement k.
//
// The actual cost added to the total problem is e^2, or (k - x'k)^2; however,
// The actual cost added to the total problem is e^2, or (k - x'y)^2; however,
// the squaring is implicitly done by the optimization framework.
//
// To write an auto-differentiable cost function for the above model, first
@@ -90,7 +90,7 @@
// Dimension of x ---------------+ |
// Dimension of y ------------------+
//
// In this example, there is usually an instance for each measumerent of k.
// In this example, there is usually an instance for each measurement of k.
//
// In the instantiation above, the template parameters following
// "MyScalarCostFunctor", "1, 2, 2", describe the functor as computing a
@@ -110,12 +110,8 @@
// Dimension of x ------------------------------------+ |
// Dimension of y ---------------------------------------+
//
// The framework can currently accommodate cost functions of up to 10
// independent variables, and there is no limit on the dimensionality
// of each of them.
//
// WARNING #1: Since the functor will get instantiated with different types for
// T, you must to convert from other numeric types to T before mixing
// T, you must convert from other numeric types to T before mixing
// computations with other variables of type T. In the example above, this is
// seen where instead of using k_ directly, k_ is wrapped with T(k_).
//
@@ -129,8 +125,9 @@
#ifndef CERES_PUBLIC_AUTODIFF_COST_FUNCTION_H_
#define CERES_PUBLIC_AUTODIFF_COST_FUNCTION_H_
#include <memory>
#include "ceres/internal/autodiff.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/sized_cost_function.h"
#include "ceres/types.h"
#include "glog/logging.h"
@@ -138,7 +135,7 @@
namespace ceres {
// A cost function which computes the derivative of the cost with respect to
// the parameters (a.k.a. the jacobian) using an autodifferentiation framework.
// the parameters (a.k.a. the jacobian) using an auto differentiation framework.
// The first template argument is the functor object, described in the header
// comment. The second argument is the dimension of the residual (or
// ceres::DYNAMIC to indicate it will be set at runtime), and subsequent
@@ -153,27 +150,15 @@ namespace ceres {
// of residuals for a single autodiff cost function at runtime.
template <typename CostFunctor,
int kNumResiduals, // Number of residuals, or ceres::DYNAMIC.
int N0, // Number of parameters in block 0.
int N1 = 0, // Number of parameters in block 1.
int N2 = 0, // Number of parameters in block 2.
int N3 = 0, // Number of parameters in block 3.
int N4 = 0, // Number of parameters in block 4.
int N5 = 0, // Number of parameters in block 5.
int N6 = 0, // Number of parameters in block 6.
int N7 = 0, // Number of parameters in block 7.
int N8 = 0, // Number of parameters in block 8.
int N9 = 0> // Number of parameters in block 9.
class AutoDiffCostFunction : public SizedCostFunction<kNumResiduals,
N0, N1, N2, N3, N4,
N5, N6, N7, N8, N9> {
int... Ns> // Number of parameters in each parameter block.
class AutoDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> {
public:
// Takes ownership of functor. Uses the template-provided value for the
// number of residuals ("kNumResiduals").
explicit AutoDiffCostFunction(CostFunctor* functor)
: functor_(functor) {
CHECK_NE(kNumResiduals, DYNAMIC)
<< "Can't run the fixed-size constructor if the "
<< "number of residuals is set to ceres::DYNAMIC.";
explicit AutoDiffCostFunction(CostFunctor* functor) : functor_(functor) {
static_assert(kNumResiduals != DYNAMIC,
"Can't run the fixed-size constructor if the number of "
"residuals is set to ceres::DYNAMIC.");
}
// Takes ownership of functor. Ignores the template-provided
@@ -183,13 +168,10 @@ class AutoDiffCostFunction : public SizedCostFunction<kNumResiduals,
// numbers of residuals at runtime.
AutoDiffCostFunction(CostFunctor* functor, int num_residuals)
: functor_(functor) {
CHECK_EQ(kNumResiduals, DYNAMIC)
<< "Can't run the dynamic-size constructor if the "
<< "number of residuals is not ceres::DYNAMIC.";
SizedCostFunction<kNumResiduals,
N0, N1, N2, N3, N4,
N5, N6, N7, N8, N9>
::set_num_residuals(num_residuals);
static_assert(kNumResiduals == DYNAMIC,
"Can't run the dynamic-size constructor if the number of "
"residuals is not ceres::DYNAMIC.");
SizedCostFunction<kNumResiduals, Ns...>::set_num_residuals(num_residuals);
}
virtual ~AutoDiffCostFunction() {}
@@ -197,29 +179,28 @@ class AutoDiffCostFunction : public SizedCostFunction<kNumResiduals,
// Implementation details follow; clients of the autodiff cost function should
// not have to examine below here.
//
// To handle varardic cost functions, some template magic is needed. It's
// To handle variadic cost functions, some template magic is needed. It's
// mostly hidden inside autodiff.h.
virtual bool Evaluate(double const* const* parameters,
double* residuals,
double** jacobians) const {
bool Evaluate(double const* const* parameters,
double* residuals,
double** jacobians) const override {
using ParameterDims =
typename SizedCostFunction<kNumResiduals, Ns...>::ParameterDims;
if (!jacobians) {
return internal::VariadicEvaluate<
CostFunctor, double, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>
::Call(*functor_, parameters, residuals);
return internal::VariadicEvaluate<ParameterDims>(
*functor_, parameters, residuals);
}
return internal::AutoDiff<CostFunctor, double,
N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>::Differentiate(
*functor_,
parameters,
SizedCostFunction<kNumResiduals,
N0, N1, N2, N3, N4,
N5, N6, N7, N8, N9>::num_residuals(),
residuals,
jacobians);
}
return internal::AutoDifferentiate<kNumResiduals, ParameterDims>(
*functor_,
parameters,
SizedCostFunction<kNumResiduals, Ns...>::num_residuals(),
residuals,
jacobians);
};
private:
internal::scoped_ptr<CostFunctor> functor_;
std::unique_ptr<CostFunctor> functor_;
};
} // namespace ceres

View File

@@ -0,0 +1,151 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#ifndef CERES_PUBLIC_AUTODIFF_FIRST_ORDER_FUNCTION_H_
#define CERES_PUBLIC_AUTODIFF_FIRST_ORDER_FUNCTION_H_
#include <memory>
#include "ceres/first_order_function.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/jet.h"
#include "ceres/types.h"
namespace ceres {
// Create FirstOrderFunctions as needed by the GradientProblem
// framework, with gradients computed via automatic
// differentiation. For more information on automatic differentiation,
// see the wikipedia article at
// http://en.wikipedia.org/wiki/Automatic_differentiation
//
// To get an auto differentiated function, you must define a class
// with a templated operator() (a functor) that computes the cost
// function in terms of the template parameter T. The autodiff
// framework substitutes appropriate "jet" objects for T in order to
// compute the derivative when necessary, but this is hidden, and you
// should write the function as if T were a scalar type (e.g. a
// double-precision floating point number).
//
// The function must write the computed value in the last argument
// (the only non-const one) and return true to indicate
// success.
//
// For example, consider a scalar error e = x'y - a, where both x and y are
// two-dimensional column vector parameters, the prime sign indicates
// transposition, and a is a constant.
//
// To write an auto-differentiable FirstOrderFunction for the above model, first
// define the object
//
// class QuadraticCostFunctor {
// public:
// explicit QuadraticCostFunctor(double a) : a_(a) {}
// template <typename T>
// bool operator()(const T* const xy, T* cost) const {
// const T* const x = xy;
// const T* const y = xy + 2;
// *cost = x[0] * y[0] + x[1] * y[1] - T(a_);
// return true;
// }
//
// private:
// double a_;
// };
//
// Note that in the declaration of operator() the input parameters xy come
// first, and are passed as const pointers to arrays of T. The
// output is the last parameter.
//
// Then given this class definition, the auto differentiated FirstOrderFunction
// for it can be constructed as follows.
//
// FirstOrderFunction* function =
// new AutoDiffFirstOrderFunction<QuadraticCostFunctor, 4>(
// new QuadraticCostFunctor(1.0)));
//
// In the instantiation above, the template parameters following
// "QuadraticCostFunctor", "4", describe the functor as computing a
// 1-dimensional output from a four dimensional vector.
//
// WARNING: Since the functor will get instantiated with different types for
// T, you must convert from other numeric types to T before mixing
// computations with other variables of type T. In the example above, this is
// seen where instead of using a_ directly, a_ is wrapped with T(a_).
template <typename FirstOrderFunctor, int kNumParameters>
class AutoDiffFirstOrderFunction : public FirstOrderFunction {
public:
// Takes ownership of functor.
explicit AutoDiffFirstOrderFunction(FirstOrderFunctor* functor)
: functor_(functor) {
static_assert(kNumParameters > 0, "kNumParameters must be positive");
}
virtual ~AutoDiffFirstOrderFunction() {}
bool Evaluate(const double* const parameters,
double* cost,
double* gradient) const override {
if (gradient == nullptr) {
return (*functor_)(parameters, cost);
}
typedef Jet<double, kNumParameters> JetT;
internal::FixedArray<JetT, (256 * 7) / sizeof(JetT)> x(kNumParameters);
for (int i = 0; i < kNumParameters; ++i) {
x[i].a = parameters[i];
x[i].v.setZero();
x[i].v[i] = 1.0;
}
JetT output;
output.a = kImpossibleValue;
output.v.setConstant(kImpossibleValue);
if (!(*functor_)(x.data(), &output)) {
return false;
}
*cost = output.a;
VectorRef(gradient, kNumParameters) = output.v;
return true;
}
int NumParameters() const override { return kNumParameters; }
private:
std::unique_ptr<FirstOrderFunctor> functor_;
};
} // namespace ceres
#endif // CERES_PUBLIC_AUTODIFF_FIRST_ORDER_FUNCTION_H_

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -33,9 +33,10 @@
#ifndef CERES_PUBLIC_AUTODIFF_LOCAL_PARAMETERIZATION_H_
#define CERES_PUBLIC_AUTODIFF_LOCAL_PARAMETERIZATION_H_
#include "ceres/local_parameterization.h"
#include <memory>
#include "ceres/internal/autodiff.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/local_parameterization.h"
namespace ceres {
@@ -107,21 +108,20 @@ namespace ceres {
template <typename Functor, int kGlobalSize, int kLocalSize>
class AutoDiffLocalParameterization : public LocalParameterization {
public:
AutoDiffLocalParameterization() :
functor_(new Functor()) {}
AutoDiffLocalParameterization() : functor_(new Functor()) {}
// Takes ownership of functor.
explicit AutoDiffLocalParameterization(Functor* functor) :
functor_(functor) {}
explicit AutoDiffLocalParameterization(Functor* functor)
: functor_(functor) {}
virtual ~AutoDiffLocalParameterization() {}
virtual bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const {
bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const override {
return (*functor_)(x, delta, x_plus_delta);
}
virtual bool ComputeJacobian(const double* x, double* jacobian) const {
bool ComputeJacobian(const double* x, double* jacobian) const override {
double zero_delta[kLocalSize];
for (int i = 0; i < kLocalSize; ++i) {
zero_delta[i] = 0.0;
@@ -133,20 +133,18 @@ class AutoDiffLocalParameterization : public LocalParameterization {
}
const double* parameter_ptrs[2] = {x, zero_delta};
double* jacobian_ptrs[2] = { NULL, jacobian };
return internal::AutoDiff<Functor, double, kGlobalSize, kLocalSize>
::Differentiate(*functor_,
parameter_ptrs,
kGlobalSize,
x_plus_delta,
jacobian_ptrs);
double* jacobian_ptrs[2] = {NULL, jacobian};
return internal::AutoDifferentiate<
kGlobalSize,
internal::StaticParameterDims<kGlobalSize, kLocalSize>>(
*functor_, parameter_ptrs, kGlobalSize, x_plus_delta, jacobian_ptrs);
}
virtual int GlobalSize() const { return kGlobalSize; }
virtual int LocalSize() const { return kLocalSize; }
int GlobalSize() const override { return kGlobalSize; }
int LocalSize() const override { return kLocalSize; }
private:
internal::scoped_ptr<Functor> functor_;
std::unique_ptr<Functor> functor_;
};
} // namespace ceres

View File

@@ -1,5 +1,5 @@
/* Ceres Solver - A fast non-linear least squares minimizer
* Copyright 2015 Google Inc. All rights reserved.
* Copyright 2019 Google Inc. All rights reserved.
* http://ceres-solver.org/
*
* Redistribution and use in source and binary forms, with or without
@@ -143,4 +143,4 @@ CERES_EXPORT void ceres_solve(ceres_problem_t* problem);
#include "ceres/internal/reenable_warnings.h"
#endif /* CERES_PUBLIC_C_API_H_ */
#endif /* CERES_PUBLIC_C_API_H_ */

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -36,12 +36,18 @@
#include "ceres/autodiff_cost_function.h"
#include "ceres/autodiff_local_parameterization.h"
#include "ceres/conditioned_cost_function.h"
#include "ceres/context.h"
#include "ceres/cost_function.h"
#include "ceres/cost_function_to_functor.h"
#include "ceres/covariance.h"
#include "ceres/crs_matrix.h"
#include "ceres/dynamic_autodiff_cost_function.h"
#include "ceres/dynamic_cost_function.h"
#include "ceres/dynamic_cost_function_to_functor.h"
#include "ceres/dynamic_numeric_diff_cost_function.h"
#include "ceres/evaluation_callback.h"
#include "ceres/gradient_checker.h"
#include "ceres/gradient_problem.h"
#include "ceres/gradient_problem_solver.h"
#include "ceres/iteration_callback.h"
@@ -49,6 +55,7 @@
#include "ceres/local_parameterization.h"
#include "ceres/loss_function.h"
#include "ceres/numeric_diff_cost_function.h"
#include "ceres/numeric_diff_options.h"
#include "ceres/ordered_groups.h"
#include "ceres/problem.h"
#include "ceres/sized_cost_function.h"

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -34,12 +34,12 @@
#ifndef CERES_PUBLIC_CONDITIONED_COST_FUNCTION_H_
#define CERES_PUBLIC_CONDITIONED_COST_FUNCTION_H_
#include <memory>
#include <vector>
#include "ceres/cost_function.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/types.h"
namespace ceres {
@@ -77,17 +77,19 @@ class CERES_EXPORT ConditionedCostFunction : public CostFunction {
// per-residual conditioner. Takes ownership of all of the wrapped cost
// functions, or not, depending on the ownership parameter. Conditioners
// may be NULL, in which case the corresponding residual is not modified.
//
// The conditioners can repeat.
ConditionedCostFunction(CostFunction* wrapped_cost_function,
const std::vector<CostFunction*>& conditioners,
Ownership ownership);
virtual ~ConditionedCostFunction();
virtual bool Evaluate(double const* const* parameters,
double* residuals,
double** jacobians) const;
bool Evaluate(double const* const* parameters,
double* residuals,
double** jacobians) const override;
private:
internal::scoped_ptr<CostFunction> wrapped_cost_function_;
std::unique_ptr<CostFunction> wrapped_cost_function_;
std::vector<CostFunction*> conditioners_;
Ownership ownership_;
};

56
extern/ceres/include/ceres/context.h vendored Normal file
View File

@@ -0,0 +1,56 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: vitus@google.com (Michael Vitus)
#ifndef CERES_PUBLIC_CONTEXT_H_
#define CERES_PUBLIC_CONTEXT_H_
namespace ceres {
// A global context for processing data in Ceres. This provides a mechanism to
// allow Ceres to reuse items that are expensive to create between multiple
// calls; for example, thread pools. The same Context can be used on multiple
// Problems, either serially or in parallel. When using it with multiple
// Problems at the same time, they may end up contending for resources
// (e.g. threads) managed by the Context.
class Context {
public:
Context() {}
Context(const Context&) = delete;
void operator=(const Context&) = delete;
virtual ~Context() {}
// Creates a context object and the caller takes ownership.
static Context* Create();
};
} // namespace ceres
#endif // CERES_PUBLIC_CONTEXT_H_

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -44,18 +44,18 @@
#ifndef CERES_PUBLIC_COST_FUNCTION_H_
#define CERES_PUBLIC_COST_FUNCTION_H_
#include <cstdint>
#include <vector>
#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
#include "ceres/types.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/port.h"
namespace ceres {
// This class implements the computation of the cost (a.k.a. residual) terms as
// a function of the input (control) variables, and is the interface for users
// to describe their least squares problem to Ceres. In other words, this is the
// modelling layer between users and the Ceres optimizer. The signature of the
// modeling layer between users and the Ceres optimizer. The signature of the
// function (number and sizes of input parameter blocks and number of outputs)
// is stored in parameter_block_sizes_ and num_residuals_ respectively. User
// code inheriting from this class is expected to set these two members with the
@@ -64,6 +64,8 @@ namespace ceres {
class CERES_EXPORT CostFunction {
public:
CostFunction() : num_residuals_(0) {}
CostFunction(const CostFunction&) = delete;
void operator=(const CostFunction&) = delete;
virtual ~CostFunction() {}
@@ -115,29 +117,24 @@ class CERES_EXPORT CostFunction {
double* residuals,
double** jacobians) const = 0;
const std::vector<int32>& parameter_block_sizes() const {
const std::vector<int32_t>& parameter_block_sizes() const {
return parameter_block_sizes_;
}
int num_residuals() const {
return num_residuals_;
}
int num_residuals() const { return num_residuals_; }
protected:
std::vector<int32>* mutable_parameter_block_sizes() {
std::vector<int32_t>* mutable_parameter_block_sizes() {
return &parameter_block_sizes_;
}
void set_num_residuals(int num_residuals) {
num_residuals_ = num_residuals;
}
void set_num_residuals(int num_residuals) { num_residuals_ = num_residuals; }
private:
// Cost function signature metadata: number of inputs & their sizes,
// number of outputs (residuals).
std::vector<int32> parameter_block_sizes_;
std::vector<int32_t> parameter_block_sizes_;
int num_residuals_;
CERES_DISALLOW_COPY_AND_ASSIGN(CostFunction);
};
} // namespace ceres

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -30,7 +30,7 @@
//
// CostFunctionToFunctor is an adapter class that allows users to use
// SizedCostFunction objects in templated functors which are to be used for
// automatic differentiation. This allows the user to seamlessly mix
// automatic differentiation. This allows the user to seamlessly mix
// analytic, numeric and automatic differentiation.
//
// For example, let us assume that
@@ -38,16 +38,15 @@
// class IntrinsicProjection : public SizedCostFunction<2, 5, 3> {
// public:
// IntrinsicProjection(const double* observation);
// virtual bool Evaluate(double const* const* parameters,
// double* residuals,
// double** jacobians) const;
// bool Evaluate(double const* const* parameters,
// double* residuals,
// double** jacobians) const override;
// };
//
// is a cost function that implements the projection of a point in its
// local coordinate system onto its image plane and subtracts it from
// the observed point projection. It can compute its residual and
// either via analytic or numerical differentiation can compute its
// jacobians.
// jacobians either via analytic or numerical differentiation.
//
// Now we would like to compose the action of this CostFunction with
// the action of camera extrinsics, i.e., rotation and
@@ -87,594 +86,83 @@
#ifndef CERES_PUBLIC_COST_FUNCTION_TO_FUNCTOR_H_
#define CERES_PUBLIC_COST_FUNCTION_TO_FUNCTOR_H_
#include <cstdint>
#include <numeric>
#include <tuple>
#include <utility>
#include <vector>
#include "ceres/cost_function.h"
#include "ceres/dynamic_cost_function_to_functor.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/internal/parameter_dims.h"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
namespace ceres {
template <int kNumResiduals,
int N0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0,
int N5 = 0, int N6 = 0, int N7 = 0, int N8 = 0, int N9 = 0>
template <int kNumResiduals, int... Ns>
class CostFunctionToFunctor {
public:
// Takes ownership of cost_function.
explicit CostFunctionToFunctor(CostFunction* cost_function)
: cost_functor_(cost_function) {
CHECK_NOTNULL(cost_function);
CHECK(cost_function != nullptr);
CHECK(kNumResiduals > 0 || kNumResiduals == DYNAMIC);
// This block breaks the 80 column rule to keep it somewhat readable.
CHECK((!N1 && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && (N9 > 0))) // NOLINT
<< "Zero block cannot precede a non-zero block. Block sizes are "
<< "(ignore trailing 0s): " << N0 << ", " << N1 << ", " << N2 << ", "
<< N3 << ", " << N4 << ", " << N5 << ", " << N6 << ", " << N7 << ", "
<< N8 << ", " << N9;
const std::vector<int32>& parameter_block_sizes =
const std::vector<int32_t>& parameter_block_sizes =
cost_function->parameter_block_sizes();
const int num_parameter_blocks =
(N0 > 0) + (N1 > 0) + (N2 > 0) + (N3 > 0) + (N4 > 0) +
(N5 > 0) + (N6 > 0) + (N7 > 0) + (N8 > 0) + (N9 > 0);
const int num_parameter_blocks = ParameterDims::kNumParameterBlocks;
CHECK_EQ(static_cast<int>(parameter_block_sizes.size()),
num_parameter_blocks);
CHECK_EQ(N0, parameter_block_sizes[0]);
if (parameter_block_sizes.size() > 1) CHECK_EQ(N1, parameter_block_sizes[1]); // NOLINT
if (parameter_block_sizes.size() > 2) CHECK_EQ(N2, parameter_block_sizes[2]); // NOLINT
if (parameter_block_sizes.size() > 3) CHECK_EQ(N3, parameter_block_sizes[3]); // NOLINT
if (parameter_block_sizes.size() > 4) CHECK_EQ(N4, parameter_block_sizes[4]); // NOLINT
if (parameter_block_sizes.size() > 5) CHECK_EQ(N5, parameter_block_sizes[5]); // NOLINT
if (parameter_block_sizes.size() > 6) CHECK_EQ(N6, parameter_block_sizes[6]); // NOLINT
if (parameter_block_sizes.size() > 7) CHECK_EQ(N7, parameter_block_sizes[7]); // NOLINT
if (parameter_block_sizes.size() > 8) CHECK_EQ(N8, parameter_block_sizes[8]); // NOLINT
if (parameter_block_sizes.size() > 9) CHECK_EQ(N9, parameter_block_sizes[9]); // NOLINT
if (parameter_block_sizes.size() == num_parameter_blocks) {
for (int block = 0; block < num_parameter_blocks; ++block) {
CHECK_EQ(ParameterDims::GetDim(block), parameter_block_sizes[block])
<< "Parameter block size missmatch. The specified static parameter "
"block dimension does not match the one from the cost function.";
}
}
CHECK_EQ(accumulate(parameter_block_sizes.begin(),
parameter_block_sizes.end(), 0),
N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9);
CHECK_EQ(accumulate(
parameter_block_sizes.begin(), parameter_block_sizes.end(), 0),
ParameterDims::kNumParameters);
}
bool operator()(const double* x0, double* residuals) const {
CHECK_NE(N0, 0);
CHECK_EQ(N1, 0);
CHECK_EQ(N2, 0);
CHECK_EQ(N3, 0);
CHECK_EQ(N4, 0);
CHECK_EQ(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
template <typename T, typename... Ts>
bool operator()(const T* p1, Ts*... ps) const {
// Add one because of residual block.
static_assert(sizeof...(Ts) + 1 == ParameterDims::kNumParameterBlocks + 1,
"Invalid number of parameter blocks specified.");
return cost_functor_(&x0, residuals);
}
auto params = std::make_tuple(p1, ps...);
bool operator()(const double* x0,
const double* x1,
double* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_EQ(N2, 0);
CHECK_EQ(N3, 0);
CHECK_EQ(N4, 0);
CHECK_EQ(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const double*> parameter_blocks(2);
parameter_blocks[0] = x0;
parameter_blocks[1] = x1;
return cost_functor_(parameter_blocks.get(), residuals);
}
// Extract residual pointer from params. The residual pointer is the
// last pointer.
constexpr int kResidualIndex = ParameterDims::kNumParameterBlocks;
T* residuals = std::get<kResidualIndex>(params);
bool operator()(const double* x0,
const double* x1,
const double* x2,
double* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_EQ(N3, 0);
CHECK_EQ(N4, 0);
CHECK_EQ(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const double*> parameter_blocks(3);
parameter_blocks[0] = x0;
parameter_blocks[1] = x1;
parameter_blocks[2] = x2;
return cost_functor_(parameter_blocks.get(), residuals);
}
// Extract parameter block pointers from params.
using Indices =
std::make_integer_sequence<int,
ParameterDims::kNumParameterBlocks>;
std::array<const T*, ParameterDims::kNumParameterBlocks> parameter_blocks =
GetParameterPointers<T>(params, Indices());
bool operator()(const double* x0,
const double* x1,
const double* x2,
const double* x3,
double* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_EQ(N4, 0);
CHECK_EQ(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const double*> parameter_blocks(4);
parameter_blocks[0] = x0;
parameter_blocks[1] = x1;
parameter_blocks[2] = x2;
parameter_blocks[3] = x3;
return cost_functor_(parameter_blocks.get(), residuals);
}
bool operator()(const double* x0,
const double* x1,
const double* x2,
const double* x3,
const double* x4,
double* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_EQ(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const double*> parameter_blocks(5);
parameter_blocks[0] = x0;
parameter_blocks[1] = x1;
parameter_blocks[2] = x2;
parameter_blocks[3] = x3;
parameter_blocks[4] = x4;
return cost_functor_(parameter_blocks.get(), residuals);
}
bool operator()(const double* x0,
const double* x1,
const double* x2,
const double* x3,
const double* x4,
const double* x5,
double* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_NE(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const double*> parameter_blocks(6);
parameter_blocks[0] = x0;
parameter_blocks[1] = x1;
parameter_blocks[2] = x2;
parameter_blocks[3] = x3;
parameter_blocks[4] = x4;
parameter_blocks[5] = x5;
return cost_functor_(parameter_blocks.get(), residuals);
}
bool operator()(const double* x0,
const double* x1,
const double* x2,
const double* x3,
const double* x4,
const double* x5,
const double* x6,
double* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_NE(N5, 0);
CHECK_NE(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const double*> parameter_blocks(7);
parameter_blocks[0] = x0;
parameter_blocks[1] = x1;
parameter_blocks[2] = x2;
parameter_blocks[3] = x3;
parameter_blocks[4] = x4;
parameter_blocks[5] = x5;
parameter_blocks[6] = x6;
return cost_functor_(parameter_blocks.get(), residuals);
}
bool operator()(const double* x0,
const double* x1,
const double* x2,
const double* x3,
const double* x4,
const double* x5,
const double* x6,
const double* x7,
double* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_NE(N5, 0);
CHECK_NE(N6, 0);
CHECK_NE(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const double*> parameter_blocks(8);
parameter_blocks[0] = x0;
parameter_blocks[1] = x1;
parameter_blocks[2] = x2;
parameter_blocks[3] = x3;
parameter_blocks[4] = x4;
parameter_blocks[5] = x5;
parameter_blocks[6] = x6;
parameter_blocks[7] = x7;
return cost_functor_(parameter_blocks.get(), residuals);
}
bool operator()(const double* x0,
const double* x1,
const double* x2,
const double* x3,
const double* x4,
const double* x5,
const double* x6,
const double* x7,
const double* x8,
double* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_NE(N5, 0);
CHECK_NE(N6, 0);
CHECK_NE(N7, 0);
CHECK_NE(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const double*> parameter_blocks(9);
parameter_blocks[0] = x0;
parameter_blocks[1] = x1;
parameter_blocks[2] = x2;
parameter_blocks[3] = x3;
parameter_blocks[4] = x4;
parameter_blocks[5] = x5;
parameter_blocks[6] = x6;
parameter_blocks[7] = x7;
parameter_blocks[8] = x8;
return cost_functor_(parameter_blocks.get(), residuals);
}
bool operator()(const double* x0,
const double* x1,
const double* x2,
const double* x3,
const double* x4,
const double* x5,
const double* x6,
const double* x7,
const double* x8,
const double* x9,
double* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_NE(N5, 0);
CHECK_NE(N6, 0);
CHECK_NE(N7, 0);
CHECK_NE(N8, 0);
CHECK_NE(N9, 0);
internal::FixedArray<const double*> parameter_blocks(10);
parameter_blocks[0] = x0;
parameter_blocks[1] = x1;
parameter_blocks[2] = x2;
parameter_blocks[3] = x3;
parameter_blocks[4] = x4;
parameter_blocks[5] = x5;
parameter_blocks[6] = x6;
parameter_blocks[7] = x7;
parameter_blocks[8] = x8;
parameter_blocks[9] = x9;
return cost_functor_(parameter_blocks.get(), residuals);
}
template <typename JetT>
bool operator()(const JetT* x0, JetT* residuals) const {
CHECK_NE(N0, 0);
CHECK_EQ(N1, 0);
CHECK_EQ(N2, 0);
CHECK_EQ(N3, 0);
CHECK_EQ(N4, 0);
CHECK_EQ(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
return cost_functor_(&x0, residuals);
}
template <typename JetT>
bool operator()(const JetT* x0,
const JetT* x1,
JetT* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_EQ(N2, 0);
CHECK_EQ(N3, 0);
CHECK_EQ(N4, 0);
CHECK_EQ(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const JetT*> jets(2);
jets[0] = x0;
jets[1] = x1;
return cost_functor_(jets.get(), residuals);
}
template <typename JetT>
bool operator()(const JetT* x0,
const JetT* x1,
const JetT* x2,
JetT* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_EQ(N3, 0);
CHECK_EQ(N4, 0);
CHECK_EQ(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const JetT*> jets(3);
jets[0] = x0;
jets[1] = x1;
jets[2] = x2;
return cost_functor_(jets.get(), residuals);
}
template <typename JetT>
bool operator()(const JetT* x0,
const JetT* x1,
const JetT* x2,
const JetT* x3,
JetT* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_EQ(N4, 0);
CHECK_EQ(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const JetT*> jets(4);
jets[0] = x0;
jets[1] = x1;
jets[2] = x2;
jets[3] = x3;
return cost_functor_(jets.get(), residuals);
}
template <typename JetT>
bool operator()(const JetT* x0,
const JetT* x1,
const JetT* x2,
const JetT* x3,
const JetT* x4,
JetT* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_EQ(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const JetT*> jets(5);
jets[0] = x0;
jets[1] = x1;
jets[2] = x2;
jets[3] = x3;
jets[4] = x4;
return cost_functor_(jets.get(), residuals);
}
template <typename JetT>
bool operator()(const JetT* x0,
const JetT* x1,
const JetT* x2,
const JetT* x3,
const JetT* x4,
const JetT* x5,
JetT* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_NE(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const JetT*> jets(6);
jets[0] = x0;
jets[1] = x1;
jets[2] = x2;
jets[3] = x3;
jets[4] = x4;
jets[5] = x5;
return cost_functor_(jets.get(), residuals);
}
template <typename JetT>
bool operator()(const JetT* x0,
const JetT* x1,
const JetT* x2,
const JetT* x3,
const JetT* x4,
const JetT* x5,
const JetT* x6,
JetT* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_NE(N5, 0);
CHECK_NE(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const JetT*> jets(7);
jets[0] = x0;
jets[1] = x1;
jets[2] = x2;
jets[3] = x3;
jets[4] = x4;
jets[5] = x5;
jets[6] = x6;
return cost_functor_(jets.get(), residuals);
}
template <typename JetT>
bool operator()(const JetT* x0,
const JetT* x1,
const JetT* x2,
const JetT* x3,
const JetT* x4,
const JetT* x5,
const JetT* x6,
const JetT* x7,
JetT* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_NE(N5, 0);
CHECK_NE(N6, 0);
CHECK_NE(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const JetT*> jets(8);
jets[0] = x0;
jets[1] = x1;
jets[2] = x2;
jets[3] = x3;
jets[4] = x4;
jets[5] = x5;
jets[6] = x6;
jets[7] = x7;
return cost_functor_(jets.get(), residuals);
}
template <typename JetT>
bool operator()(const JetT* x0,
const JetT* x1,
const JetT* x2,
const JetT* x3,
const JetT* x4,
const JetT* x5,
const JetT* x6,
const JetT* x7,
const JetT* x8,
JetT* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_NE(N5, 0);
CHECK_NE(N6, 0);
CHECK_NE(N7, 0);
CHECK_NE(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const JetT*> jets(9);
jets[0] = x0;
jets[1] = x1;
jets[2] = x2;
jets[3] = x3;
jets[4] = x4;
jets[5] = x5;
jets[6] = x6;
jets[7] = x7;
jets[8] = x8;
return cost_functor_(jets.get(), residuals);
}
template <typename JetT>
bool operator()(const JetT* x0,
const JetT* x1,
const JetT* x2,
const JetT* x3,
const JetT* x4,
const JetT* x5,
const JetT* x6,
const JetT* x7,
const JetT* x8,
const JetT* x9,
JetT* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_NE(N5, 0);
CHECK_NE(N6, 0);
CHECK_NE(N7, 0);
CHECK_NE(N8, 0);
CHECK_NE(N9, 0);
internal::FixedArray<const JetT*> jets(10);
jets[0] = x0;
jets[1] = x1;
jets[2] = x2;
jets[3] = x3;
jets[4] = x4;
jets[5] = x5;
jets[6] = x6;
jets[7] = x7;
jets[8] = x8;
jets[9] = x9;
return cost_functor_(jets.get(), residuals);
return cost_functor_(parameter_blocks.data(), residuals);
}
private:
using ParameterDims = internal::StaticParameterDims<Ns...>;
template <typename T, typename Tuple, int... Indices>
static std::array<const T*, ParameterDims::kNumParameterBlocks>
GetParameterPointers(const Tuple& paramPointers,
std::integer_sequence<int, Indices...>) {
return std::array<const T*, ParameterDims::kNumParameterBlocks>{
{std::get<Indices>(paramPointers)...}};
}
DynamicCostFunctionToFunctor cost_functor_;
};

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -31,12 +31,13 @@
#ifndef CERES_PUBLIC_COVARIANCE_H_
#define CERES_PUBLIC_COVARIANCE_H_
#include <memory>
#include <utility>
#include <vector>
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/port.h"
#include "ceres/types.h"
namespace ceres {
@@ -60,7 +61,7 @@ class CovarianceImpl;
// Background
// ==========
// One way to assess the quality of the solution returned by a
// non-linear least squares solve is to analyze the covariance of the
// non-linear least squares solver is to analyze the covariance of the
// solution.
//
// Let us consider the non-linear regression problem
@@ -158,7 +159,7 @@ class CovarianceImpl;
// Gauge Invariance
// ----------------
// In structure from motion (3D reconstruction) problems, the
// reconstruction is ambiguous upto a similarity transform. This is
// reconstruction is ambiguous up to a similarity transform. This is
// known as a Gauge Ambiguity. Handling Gauges correctly requires the
// use of SVD or custom inversion algorithms. For small problems the
// user can use the dense algorithm. For more details see
@@ -183,7 +184,7 @@ class CovarianceImpl;
// Covariance::Options options;
// Covariance covariance(options);
//
// std::vector<std::pair<const double*, const double*> > covariance_blocks;
// std::vector<std::pair<const double*, const double*>> covariance_blocks;
// covariance_blocks.push_back(make_pair(x, x));
// covariance_blocks.push_back(make_pair(y, y));
// covariance_blocks.push_back(make_pair(x, y));
@@ -200,19 +201,19 @@ class CovarianceImpl;
class CERES_EXPORT Covariance {
public:
struct CERES_EXPORT Options {
Options()
#ifndef CERES_NO_SUITESPARSE
: algorithm_type(SUITE_SPARSE_QR),
// Sparse linear algebra library to use when a sparse matrix
// factorization is being used to compute the covariance matrix.
//
// Currently this only applies to SPARSE_QR.
SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type =
#if !defined(CERES_NO_SUITESPARSE)
SUITE_SPARSE;
#else
: algorithm_type(EIGEN_SPARSE_QR),
// Eigen's QR factorization is always available.
EIGEN_SPARSE;
#endif
min_reciprocal_condition_number(1e-14),
null_space_rank(0),
num_threads(1),
apply_loss_function(true) {
}
// Ceres supports three different algorithms for covariance
// Ceres supports two different algorithms for covariance
// estimation, which represent different tradeoffs in speed,
// accuracy and reliability.
//
@@ -229,23 +230,20 @@ class CERES_EXPORT Covariance {
// for small to moderate sized problems. It can handle
// full-rank as well as rank deficient Jacobians.
//
// 2. EIGEN_SPARSE_QR uses the sparse QR factorization algorithm
// in Eigen to compute the decomposition
// 2. SPARSE_QR uses the sparse QR factorization algorithm
// to compute the decomposition
//
// Q * R = J
//
// [J'J]^-1 = [R*R']^-1
//
// It is a moderately fast algorithm for sparse matrices.
//
// 3. SUITE_SPARSE_QR uses the SuiteSparseQR sparse QR
// factorization algorithm. It uses dense linear algebra and is
// multi threaded, so for large sparse sparse matrices it is
// significantly faster than EIGEN_SPARSE_QR.
//
// Neither EIGEN_SPARSE_QR not SUITE_SPARSE_QR are capable of
// computing the covariance if the Jacobian is rank deficient.
CovarianceAlgorithmType algorithm_type;
// SPARSE_QR is not capable of computing the covariance if the
// Jacobian is rank deficient. Depending on the value of
// Covariance::Options::sparse_linear_algebra_library_type, either
// Eigen's Sparse QR factorization algorithm will be used or
// SuiteSparse's high performance SuiteSparseQR algorithm will be
// used.
CovarianceAlgorithmType algorithm_type = SPARSE_QR;
// If the Jacobian matrix is near singular, then inverting J'J
// will result in unreliable results, e.g, if
@@ -270,7 +268,7 @@ class CERES_EXPORT Covariance {
// where min_sigma and max_sigma are the minimum and maxiumum
// singular values of J respectively.
//
// 2. SUITE_SPARSE_QR and EIGEN_SPARSE_QR
// 2. SPARSE_QR
//
// rank(J) < num_col(J)
//
@@ -278,7 +276,7 @@ class CERES_EXPORT Covariance {
// sparse QR factorization algorithm. It is a fairly reliable
// indication of rank deficiency.
//
double min_reciprocal_condition_number;
double min_reciprocal_condition_number = 1e-14;
// When using DENSE_SVD, the user has more control in dealing with
// singular and near singular covariance matrices.
@@ -313,9 +311,9 @@ class CERES_EXPORT Covariance {
//
// This option has no effect on the SUITE_SPARSE_QR and
// EIGEN_SPARSE_QR algorithms.
int null_space_rank;
int null_space_rank = 0;
int num_threads;
int num_threads = 1;
// Even though the residual blocks in the problem may contain loss
// functions, setting apply_loss_function to false will turn off
@@ -323,7 +321,7 @@ class CERES_EXPORT Covariance {
// function and in turn its effect on the covariance.
//
// TODO(sameergaarwal): Expand this based on Jim's experiments.
bool apply_loss_function;
bool apply_loss_function = true;
};
explicit Covariance(const Options& options);
@@ -352,10 +350,9 @@ class CERES_EXPORT Covariance {
// covariance computation. Please see the documentation for
// Covariance::Options for more on the conditions under which this
// function returns false.
bool Compute(
const std::vector<std::pair<const double*,
const double*> >& covariance_blocks,
Problem* problem);
bool Compute(const std::vector<std::pair<const double*, const double*>>&
covariance_blocks,
Problem* problem);
// Compute a part of the covariance matrix.
//
@@ -428,8 +425,8 @@ class CERES_EXPORT Covariance {
// a square matrix whose row and column count is equal to the sum of
// the sizes of the individual parameter blocks. The covariance
// matrix will be a row-major matrix.
bool GetCovarianceMatrix(const std::vector<const double *> &parameter_blocks,
double *covariance_matrix);
bool GetCovarianceMatrix(const std::vector<const double*>& parameter_blocks,
double* covariance_matrix) const;
// Return the covariance matrix corresponding to parameter_blocks
// in the tangent space if a local parameterization is associated
@@ -448,10 +445,10 @@ class CERES_EXPORT Covariance {
// blocks. The covariance matrix will be a row-major matrix.
bool GetCovarianceMatrixInTangentSpace(
const std::vector<const double*>& parameter_blocks,
double* covariance_matrix);
double* covariance_matrix) const;
private:
internal::scoped_ptr<internal::CovarianceImpl> impl_;
std::unique_ptr<internal::CovarianceImpl> impl_;
};
} // namespace ceres

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -32,8 +32,9 @@
#define CERES_PUBLIC_CRS_MATRIX_H_
#include <vector>
#include "ceres/internal/port.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/port.h"
namespace ceres {

View File

@@ -0,0 +1,436 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#ifndef CERES_PUBLIC_CUBIC_INTERPOLATION_H_
#define CERES_PUBLIC_CUBIC_INTERPOLATION_H_
#include "Eigen/Core"
#include "ceres/internal/port.h"
#include "glog/logging.h"
namespace ceres {
// Given samples from a function sampled at four equally spaced points,
//
// p0 = f(-1)
// p1 = f(0)
// p2 = f(1)
// p3 = f(2)
//
// Evaluate the cubic Hermite spline (also known as the Catmull-Rom
// spline) at a point x that lies in the interval [0, 1].
//
// This is also the interpolation kernel (for the case of a = 0.5) as
// proposed by R. Keys, in:
//
// "Cubic convolution interpolation for digital image processing".
// IEEE Transactions on Acoustics, Speech, and Signal Processing
// 29 (6): 1153-1160.
//
// For more details see
//
// http://en.wikipedia.org/wiki/Cubic_Hermite_spline
// http://en.wikipedia.org/wiki/Bicubic_interpolation
//
// f if not NULL will contain the interpolated function values.
// dfdx if not NULL will contain the interpolated derivative values.
template <int kDataDimension>
void CubicHermiteSpline(const Eigen::Matrix<double, kDataDimension, 1>& p0,
const Eigen::Matrix<double, kDataDimension, 1>& p1,
const Eigen::Matrix<double, kDataDimension, 1>& p2,
const Eigen::Matrix<double, kDataDimension, 1>& p3,
const double x,
double* f,
double* dfdx) {
typedef Eigen::Matrix<double, kDataDimension, 1> VType;
const VType a = 0.5 * (-p0 + 3.0 * p1 - 3.0 * p2 + p3);
const VType b = 0.5 * (2.0 * p0 - 5.0 * p1 + 4.0 * p2 - p3);
const VType c = 0.5 * (-p0 + p2);
const VType d = p1;
// Use Horner's rule to evaluate the function value and its
// derivative.
// f = ax^3 + bx^2 + cx + d
if (f != NULL) {
Eigen::Map<VType>(f, kDataDimension) = d + x * (c + x * (b + x * a));
}
// dfdx = 3ax^2 + 2bx + c
if (dfdx != NULL) {
Eigen::Map<VType>(dfdx, kDataDimension) = c + x * (2.0 * b + 3.0 * a * x);
}
}
// Given as input an infinite one dimensional grid, which provides the
// following interface.
//
// class Grid {
// public:
// enum { DATA_DIMENSION = 2; };
// void GetValue(int n, double* f) const;
// };
//
// Here, GetValue gives the value of a function f (possibly vector
// valued) for any integer n.
//
// The enum DATA_DIMENSION indicates the dimensionality of the
// function being interpolated. For example if you are interpolating
// rotations in axis-angle format over time, then DATA_DIMENSION = 3.
//
// CubicInterpolator uses cubic Hermite splines to produce a smooth
// approximation to it that can be used to evaluate the f(x) and f'(x)
// at any point on the real number line.
//
// For more details on cubic interpolation see
//
// http://en.wikipedia.org/wiki/Cubic_Hermite_spline
//
// Example usage:
//
// const double data[] = {1.0, 2.0, 5.0, 6.0};
// Grid1D<double, 1> grid(data, 0, 4);
// CubicInterpolator<Grid1D<double, 1>> interpolator(grid);
// double f, dfdx;
// interpolator.Evaluator(1.5, &f, &dfdx);
template <typename Grid>
class CubicInterpolator {
public:
explicit CubicInterpolator(const Grid& grid) : grid_(grid) {
// The + casts the enum into an int before doing the
// comparison. It is needed to prevent
// "-Wunnamed-type-template-args" related errors.
CHECK_GE(+Grid::DATA_DIMENSION, 1);
}
void Evaluate(double x, double* f, double* dfdx) const {
const int n = std::floor(x);
Eigen::Matrix<double, Grid::DATA_DIMENSION, 1> p0, p1, p2, p3;
grid_.GetValue(n - 1, p0.data());
grid_.GetValue(n, p1.data());
grid_.GetValue(n + 1, p2.data());
grid_.GetValue(n + 2, p3.data());
CubicHermiteSpline<Grid::DATA_DIMENSION>(p0, p1, p2, p3, x - n, f, dfdx);
}
// The following two Evaluate overloads are needed for interfacing
// with automatic differentiation. The first is for when a scalar
// evaluation is done, and the second one is for when Jets are used.
void Evaluate(const double& x, double* f) const { Evaluate(x, f, NULL); }
template <typename JetT>
void Evaluate(const JetT& x, JetT* f) const {
double fx[Grid::DATA_DIMENSION], dfdx[Grid::DATA_DIMENSION];
Evaluate(x.a, fx, dfdx);
for (int i = 0; i < Grid::DATA_DIMENSION; ++i) {
f[i].a = fx[i];
f[i].v = dfdx[i] * x.v;
}
}
private:
const Grid& grid_;
};
// An object that implements an infinite one dimensional grid needed
// by the CubicInterpolator where the source of the function values is
// an array of type T on the interval
//
// [begin, ..., end - 1]
//
// Since the input array is finite and the grid is infinite, values
// outside this interval needs to be computed. Grid1D uses the value
// from the nearest edge.
//
// The function being provided can be vector valued, in which case
// kDataDimension > 1. The dimensional slices of the function maybe
// interleaved, or they maybe stacked, i.e, if the function has
// kDataDimension = 2, if kInterleaved = true, then it is stored as
//
// f01, f02, f11, f12 ....
//
// and if kInterleaved = false, then it is stored as
//
// f01, f11, .. fn1, f02, f12, .. , fn2
//
template <typename T, int kDataDimension = 1, bool kInterleaved = true>
struct Grid1D {
public:
enum { DATA_DIMENSION = kDataDimension };
Grid1D(const T* data, const int begin, const int end)
: data_(data), begin_(begin), end_(end), num_values_(end - begin) {
CHECK_LT(begin, end);
}
EIGEN_STRONG_INLINE void GetValue(const int n, double* f) const {
const int idx = std::min(std::max(begin_, n), end_ - 1) - begin_;
if (kInterleaved) {
for (int i = 0; i < kDataDimension; ++i) {
f[i] = static_cast<double>(data_[kDataDimension * idx + i]);
}
} else {
for (int i = 0; i < kDataDimension; ++i) {
f[i] = static_cast<double>(data_[i * num_values_ + idx]);
}
}
}
private:
const T* data_;
const int begin_;
const int end_;
const int num_values_;
};
// Given as input an infinite two dimensional grid like object, which
// provides the following interface:
//
// struct Grid {
// enum { DATA_DIMENSION = 1 };
// void GetValue(int row, int col, double* f) const;
// };
//
// Where, GetValue gives us the value of a function f (possibly vector
// valued) for any pairs of integers (row, col), and the enum
// DATA_DIMENSION indicates the dimensionality of the function being
// interpolated. For example if you are interpolating a color image
// with three channels (Red, Green & Blue), then DATA_DIMENSION = 3.
//
// BiCubicInterpolator uses the cubic convolution interpolation
// algorithm of R. Keys, to produce a smooth approximation to it that
// can be used to evaluate the f(r,c), df(r, c)/dr and df(r,c)/dc at
// any point in the real plane.
//
// For more details on the algorithm used here see:
//
// "Cubic convolution interpolation for digital image processing".
// Robert G. Keys, IEEE Trans. on Acoustics, Speech, and Signal
// Processing 29 (6): 1153-1160, 1981.
//
// http://en.wikipedia.org/wiki/Cubic_Hermite_spline
// http://en.wikipedia.org/wiki/Bicubic_interpolation
//
// Example usage:
//
// const double data[] = {1.0, 3.0, -1.0, 4.0,
// 3.6, 2.1, 4.2, 2.0,
// 2.0, 1.0, 3.1, 5.2};
// Grid2D<double, 1> grid(data, 3, 4);
// BiCubicInterpolator<Grid2D<double, 1>> interpolator(grid);
// double f, dfdr, dfdc;
// interpolator.Evaluate(1.2, 2.5, &f, &dfdr, &dfdc);
template <typename Grid>
class BiCubicInterpolator {
public:
explicit BiCubicInterpolator(const Grid& grid) : grid_(grid) {
// The + casts the enum into an int before doing the
// comparison. It is needed to prevent
// "-Wunnamed-type-template-args" related errors.
CHECK_GE(+Grid::DATA_DIMENSION, 1);
}
// Evaluate the interpolated function value and/or its
// derivative. Uses the nearest point on the grid boundary if r or
// c is out of bounds.
void Evaluate(
double r, double c, double* f, double* dfdr, double* dfdc) const {
// BiCubic interpolation requires 16 values around the point being
// evaluated. We will use pij, to indicate the elements of the
// 4x4 grid of values.
//
// col
// p00 p01 p02 p03
// row p10 p11 p12 p13
// p20 p21 p22 p23
// p30 p31 p32 p33
//
// The point (r,c) being evaluated is assumed to lie in the square
// defined by p11, p12, p22 and p21.
const int row = std::floor(r);
const int col = std::floor(c);
Eigen::Matrix<double, Grid::DATA_DIMENSION, 1> p0, p1, p2, p3;
// Interpolate along each of the four rows, evaluating the function
// value and the horizontal derivative in each row.
Eigen::Matrix<double, Grid::DATA_DIMENSION, 1> f0, f1, f2, f3;
Eigen::Matrix<double, Grid::DATA_DIMENSION, 1> df0dc, df1dc, df2dc, df3dc;
grid_.GetValue(row - 1, col - 1, p0.data());
grid_.GetValue(row - 1, col, p1.data());
grid_.GetValue(row - 1, col + 1, p2.data());
grid_.GetValue(row - 1, col + 2, p3.data());
CubicHermiteSpline<Grid::DATA_DIMENSION>(
p0, p1, p2, p3, c - col, f0.data(), df0dc.data());
grid_.GetValue(row, col - 1, p0.data());
grid_.GetValue(row, col, p1.data());
grid_.GetValue(row, col + 1, p2.data());
grid_.GetValue(row, col + 2, p3.data());
CubicHermiteSpline<Grid::DATA_DIMENSION>(
p0, p1, p2, p3, c - col, f1.data(), df1dc.data());
grid_.GetValue(row + 1, col - 1, p0.data());
grid_.GetValue(row + 1, col, p1.data());
grid_.GetValue(row + 1, col + 1, p2.data());
grid_.GetValue(row + 1, col + 2, p3.data());
CubicHermiteSpline<Grid::DATA_DIMENSION>(
p0, p1, p2, p3, c - col, f2.data(), df2dc.data());
grid_.GetValue(row + 2, col - 1, p0.data());
grid_.GetValue(row + 2, col, p1.data());
grid_.GetValue(row + 2, col + 1, p2.data());
grid_.GetValue(row + 2, col + 2, p3.data());
CubicHermiteSpline<Grid::DATA_DIMENSION>(
p0, p1, p2, p3, c - col, f3.data(), df3dc.data());
// Interpolate vertically the interpolated value from each row and
// compute the derivative along the columns.
CubicHermiteSpline<Grid::DATA_DIMENSION>(f0, f1, f2, f3, r - row, f, dfdr);
if (dfdc != NULL) {
// Interpolate vertically the derivative along the columns.
CubicHermiteSpline<Grid::DATA_DIMENSION>(
df0dc, df1dc, df2dc, df3dc, r - row, dfdc, NULL);
}
}
// The following two Evaluate overloads are needed for interfacing
// with automatic differentiation. The first is for when a scalar
// evaluation is done, and the second one is for when Jets are used.
void Evaluate(const double& r, const double& c, double* f) const {
Evaluate(r, c, f, NULL, NULL);
}
template <typename JetT>
void Evaluate(const JetT& r, const JetT& c, JetT* f) const {
double frc[Grid::DATA_DIMENSION];
double dfdr[Grid::DATA_DIMENSION];
double dfdc[Grid::DATA_DIMENSION];
Evaluate(r.a, c.a, frc, dfdr, dfdc);
for (int i = 0; i < Grid::DATA_DIMENSION; ++i) {
f[i].a = frc[i];
f[i].v = dfdr[i] * r.v + dfdc[i] * c.v;
}
}
private:
const Grid& grid_;
};
// An object that implements an infinite two dimensional grid needed
// by the BiCubicInterpolator where the source of the function values
// is an grid of type T on the grid
//
// [(row_start, col_start), ..., (row_start, col_end - 1)]
// [ ... ]
// [(row_end - 1, col_start), ..., (row_end - 1, col_end - 1)]
//
// Since the input grid is finite and the grid is infinite, values
// outside this interval needs to be computed. Grid2D uses the value
// from the nearest edge.
//
// The function being provided can be vector valued, in which case
// kDataDimension > 1. The data maybe stored in row or column major
// format and the various dimensional slices of the function maybe
// interleaved, or they maybe stacked, i.e, if the function has
// kDataDimension = 2, is stored in row-major format and if
// kInterleaved = true, then it is stored as
//
// f001, f002, f011, f012, ...
//
// A commonly occuring example are color images (RGB) where the three
// channels are stored interleaved.
//
// If kInterleaved = false, then it is stored as
//
// f001, f011, ..., fnm1, f002, f012, ...
template <typename T,
int kDataDimension = 1,
bool kRowMajor = true,
bool kInterleaved = true>
struct Grid2D {
public:
enum { DATA_DIMENSION = kDataDimension };
Grid2D(const T* data,
const int row_begin,
const int row_end,
const int col_begin,
const int col_end)
: data_(data),
row_begin_(row_begin),
row_end_(row_end),
col_begin_(col_begin),
col_end_(col_end),
num_rows_(row_end - row_begin),
num_cols_(col_end - col_begin),
num_values_(num_rows_ * num_cols_) {
CHECK_GE(kDataDimension, 1);
CHECK_LT(row_begin, row_end);
CHECK_LT(col_begin, col_end);
}
EIGEN_STRONG_INLINE void GetValue(const int r, const int c, double* f) const {
const int row_idx =
std::min(std::max(row_begin_, r), row_end_ - 1) - row_begin_;
const int col_idx =
std::min(std::max(col_begin_, c), col_end_ - 1) - col_begin_;
const int n = (kRowMajor) ? num_cols_ * row_idx + col_idx
: num_rows_ * col_idx + row_idx;
if (kInterleaved) {
for (int i = 0; i < kDataDimension; ++i) {
f[i] = static_cast<double>(data_[kDataDimension * n + i]);
}
} else {
for (int i = 0; i < kDataDimension; ++i) {
f[i] = static_cast<double>(data_[i * num_values_ + n]);
}
}
}
private:
const T* data_;
const int row_begin_;
const int row_end_;
const int col_begin_;
const int col_end_;
const int num_rows_;
const int num_cols_;
const int num_values_;
};
} // namespace ceres
#endif // CERES_PUBLIC_CUBIC_INTERPOLATOR_H_

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -28,7 +28,22 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
// mierle@gmail.com (Keir Mierle)
//
#ifndef CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_
#define CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_
#include <cmath>
#include <memory>
#include <numeric>
#include <vector>
#include "ceres/dynamic_cost_function.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/jet.h"
#include "glog/logging.h"
namespace ceres {
// This autodiff implementation differs from the one found in
// autodiff_cost_function.h by supporting autodiff on cost functions
// with variable numbers of parameters with variable sizes. With the
@@ -43,7 +58,7 @@
// bool operator()(T const* const* parameters, T* residuals) const {
// // Use parameters[i] to access the i'th parameter block.
// }
// }
// };
//
// Since the sizing of the parameters is done at runtime, you must
// also specify the sizes after creating the dynamic autodiff cost
@@ -60,40 +75,17 @@
// default, controlled by the Stride template parameter) with each
// pass. There is a tradeoff with the size of the passes; you may want
// to experiment with the stride.
#ifndef CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_
#define CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_
#include <cmath>
#include <numeric>
#include <vector>
#include "ceres/cost_function.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/jet.h"
#include "glog/logging.h"
namespace ceres {
template <typename CostFunctor, int Stride = 4>
class DynamicAutoDiffCostFunction : public CostFunction {
class DynamicAutoDiffCostFunction : public DynamicCostFunction {
public:
explicit DynamicAutoDiffCostFunction(CostFunctor* functor)
: functor_(functor) {}
: functor_(functor) {}
virtual ~DynamicAutoDiffCostFunction() {}
void AddParameterBlock(int size) {
mutable_parameter_block_sizes()->push_back(size);
}
void SetNumResiduals(int num_residuals) {
set_num_residuals(num_residuals);
}
virtual bool Evaluate(double const* const* parameters,
double* residuals,
double** jacobians) const {
bool Evaluate(double const* const* parameters,
double* residuals,
double** jacobians) const override {
CHECK_GT(num_residuals(), 0)
<< "You must call DynamicAutoDiffCostFunction::SetNumResiduals() "
<< "before DynamicAutoDiffCostFunction::Evaluate().";
@@ -112,20 +104,23 @@ class DynamicAutoDiffCostFunction : public CostFunction {
// depends on.
//
// To work around this issue, the solution here is to evaluate the
// jacobians in a series of passes, each one computing Stripe *
// jacobians in a series of passes, each one computing Stride *
// num_residuals() derivatives. This is done with small, fixed-size jets.
const int num_parameter_blocks = parameter_block_sizes().size();
const int num_parameters = std::accumulate(parameter_block_sizes().begin(),
parameter_block_sizes().end(),
0);
const int num_parameter_blocks =
static_cast<int>(parameter_block_sizes().size());
const int num_parameters = std::accumulate(
parameter_block_sizes().begin(), parameter_block_sizes().end(), 0);
// Allocate scratch space for the strided evaluation.
std::vector<Jet<double, Stride> > input_jets(num_parameters);
std::vector<Jet<double, Stride> > output_jets(num_residuals());
using JetT = Jet<double, Stride>;
internal::FixedArray<JetT, (256 * 7) / sizeof(JetT)> input_jets(
num_parameters);
internal::FixedArray<JetT, (256 * 7) / sizeof(JetT)> output_jets(
num_residuals());
// Make the parameter pack that is sent to the functor (reused).
std::vector<Jet<double, Stride>* > jet_parameters(num_parameter_blocks,
static_cast<Jet<double, Stride>* >(NULL));
internal::FixedArray<Jet<double, Stride>*> jet_parameters(
num_parameter_blocks, nullptr);
int num_active_parameters = 0;
// To handle constant parameters between non-constant parameter blocks, the
@@ -172,8 +167,8 @@ class DynamicAutoDiffCostFunction : public CostFunction {
// Evaluate all of the strides. Each stride is a chunk of the derivative to
// evaluate, typically some size proportional to the size of the SIMD
// registers of the CPU.
int num_strides = static_cast<int>(ceil(num_active_parameters /
static_cast<float>(Stride)));
int num_strides = static_cast<int>(
ceil(num_active_parameters / static_cast<float>(Stride)));
int current_derivative_section = 0;
int current_derivative_section_cursor = 0;
@@ -183,7 +178,7 @@ class DynamicAutoDiffCostFunction : public CostFunction {
// non-constant #Stride parameters.
const int initial_derivative_section = current_derivative_section;
const int initial_derivative_section_cursor =
current_derivative_section_cursor;
current_derivative_section_cursor;
int active_parameter_count = 0;
parameter_cursor = 0;
@@ -193,9 +188,9 @@ class DynamicAutoDiffCostFunction : public CostFunction {
++j, parameter_cursor++) {
input_jets[parameter_cursor].v.setZero();
if (active_parameter_count < Stride &&
parameter_cursor >= (
start_derivative_section[current_derivative_section] +
current_derivative_section_cursor)) {
parameter_cursor >=
(start_derivative_section[current_derivative_section] +
current_derivative_section_cursor)) {
if (jacobians[i] != NULL) {
input_jets[parameter_cursor].v[active_parameter_count] = 1.0;
++active_parameter_count;
@@ -222,9 +217,9 @@ class DynamicAutoDiffCostFunction : public CostFunction {
for (int j = 0; j < parameter_block_sizes()[i];
++j, parameter_cursor++) {
if (active_parameter_count < Stride &&
parameter_cursor >= (
start_derivative_section[current_derivative_section] +
current_derivative_section_cursor)) {
parameter_cursor >=
(start_derivative_section[current_derivative_section] +
current_derivative_section_cursor)) {
if (jacobians[i] != NULL) {
for (int k = 0; k < num_residuals(); ++k) {
jacobians[i][k * parameter_block_sizes()[i] + j] =
@@ -252,7 +247,7 @@ class DynamicAutoDiffCostFunction : public CostFunction {
}
private:
internal::scoped_ptr<CostFunctor> functor_;
std::unique_ptr<CostFunctor> functor_;
};
} // namespace ceres

View File

@@ -0,0 +1,56 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#ifndef CERES_PUBLIC_DYNAMIC_COST_FUNCTION_H_
#define CERES_PUBLIC_DYNAMIC_COST_FUNCTION_H_
#include "ceres/cost_function.h"
namespace ceres {
// A common base class for DynamicAutoDiffCostFunction and
// DynamicNumericDiffCostFunction which depend on methods that can add
// parameter blocks and set the number of residuals at run time.
class CERES_EXPORT DynamicCostFunction : public CostFunction {
public:
~DynamicCostFunction() {}
virtual void AddParameterBlock(int size) {
mutable_parameter_block_sizes()->push_back(size);
}
virtual void SetNumResiduals(int num_residuals) {
set_num_residuals(num_residuals);
}
};
} // namespace ceres
#endif // CERES_PUBLIC_DYNAMIC_COST_FUNCTION_H_

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -28,7 +28,20 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
// dgossow@google.com (David Gossow)
//
#ifndef CERES_PUBLIC_DYNAMIC_COST_FUNCTION_TO_FUNCTOR_H_
#define CERES_PUBLIC_DYNAMIC_COST_FUNCTION_TO_FUNCTOR_H_
#include <memory>
#include <numeric>
#include <vector>
#include "ceres/dynamic_cost_function.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/internal/port.h"
namespace ceres {
// DynamicCostFunctionToFunctor allows users to use CostFunction
// objects in templated functors which are to be used for automatic
// differentiation. It works similar to CostFunctionToFunctor, with the
@@ -40,9 +53,9 @@
// class IntrinsicProjection : public CostFunction {
// public:
// IntrinsicProjection(const double* observation);
// virtual bool Evaluate(double const* const* parameters,
// double* residuals,
// double** jacobians) const;
// bool Evaluate(double const* const* parameters,
// double* residuals,
// double** jacobians) const override;
// };
//
// is a cost function that implements the projection of a point in its
@@ -87,26 +100,12 @@
// private:
// DynamicCostFunctionToFunctor intrinsic_projection_;
// };
#ifndef CERES_PUBLIC_DYNAMIC_COST_FUNCTION_TO_FUNCTOR_H_
#define CERES_PUBLIC_DYNAMIC_COST_FUNCTION_TO_FUNCTOR_H_
#include <numeric>
#include <vector>
#include "ceres/cost_function.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
namespace ceres {
class DynamicCostFunctionToFunctor {
public:
// Takes ownership of cost_function.
explicit DynamicCostFunctionToFunctor(CostFunction* cost_function)
: cost_function_(cost_function) {
CHECK_NOTNULL(cost_function);
CHECK(cost_function != nullptr);
}
bool operator()(double const* const* parameters, double* residuals) const {
@@ -115,12 +114,13 @@ class DynamicCostFunctionToFunctor {
template <typename JetT>
bool operator()(JetT const* const* inputs, JetT* output) const {
const std::vector<int32>& parameter_block_sizes =
const std::vector<int32_t>& parameter_block_sizes =
cost_function_->parameter_block_sizes();
const int num_parameter_blocks = parameter_block_sizes.size();
const int num_parameter_blocks =
static_cast<int>(parameter_block_sizes.size());
const int num_residuals = cost_function_->num_residuals();
const int num_parameters = std::accumulate(parameter_block_sizes.begin(),
parameter_block_sizes.end(), 0);
const int num_parameters = std::accumulate(
parameter_block_sizes.begin(), parameter_block_sizes.end(), 0);
internal::FixedArray<double> parameters(num_parameters);
internal::FixedArray<double*> parameter_blocks(num_parameter_blocks);
@@ -130,8 +130,8 @@ class DynamicCostFunctionToFunctor {
// Build a set of arrays to get the residuals and jacobians from
// the CostFunction wrapped by this functor.
double* parameter_ptr = parameters.get();
double* jacobian_ptr = jacobians.get();
double* parameter_ptr = parameters.data();
double* jacobian_ptr = jacobians.data();
for (int i = 0; i < num_parameter_blocks; ++i) {
parameter_blocks[i] = parameter_ptr;
jacobian_blocks[i] = jacobian_ptr;
@@ -141,9 +141,9 @@ class DynamicCostFunctionToFunctor {
jacobian_ptr += num_residuals * parameter_block_sizes[i];
}
if (!cost_function_->Evaluate(parameter_blocks.get(),
residuals.get(),
jacobian_blocks.get())) {
if (!cost_function_->Evaluate(parameter_blocks.data(),
residuals.data(),
jacobian_blocks.data())) {
return false;
}
@@ -170,7 +170,7 @@ class DynamicCostFunctionToFunctor {
output[i].v.setZero();
for (int j = 0; j < num_parameter_blocks; ++j) {
const int32 block_size = parameter_block_sizes[j];
const int32_t block_size = parameter_block_sizes[j];
for (int k = 0; k < parameter_block_sizes[j]; ++k) {
output[i].v +=
jacobian_blocks[j][i * block_size + k] * inputs[j][k].v;
@@ -182,7 +182,7 @@ class DynamicCostFunctionToFunctor {
}
private:
internal::scoped_ptr<CostFunction> cost_function_;
std::unique_ptr<CostFunction> cost_function_;
};
} // namespace ceres

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -30,7 +30,24 @@
// sameeragarwal@google.com (Sameer Agarwal)
// thadh@gmail.com (Thad Hughes)
// tbennun@gmail.com (Tal Ben-Nun)
//
#ifndef CERES_PUBLIC_DYNAMIC_NUMERIC_DIFF_COST_FUNCTION_H_
#define CERES_PUBLIC_DYNAMIC_NUMERIC_DIFF_COST_FUNCTION_H_
#include <cmath>
#include <memory>
#include <numeric>
#include <vector>
#include "ceres/dynamic_cost_function.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/numeric_diff.h"
#include "ceres/internal/parameter_dims.h"
#include "ceres/numeric_diff_options.h"
#include "glog/logging.h"
namespace ceres {
// This numeric diff implementation differs from the one found in
// numeric_diff_cost_function.h by supporting numericdiff on cost
// functions with variable numbers of parameters with variable
@@ -42,7 +59,9 @@
// numeric diff; the expected interface for the cost functors is:
//
// struct MyCostFunctor {
// bool operator()(double const* const* parameters, double* residuals) const {
// bool operator()(double const*
// const* parameters,
// double* residuals) const {
// // Use parameters[i] to access the i'th parameter block.
// }
// }
@@ -56,34 +75,14 @@
// cost_function.AddParameterBlock(5);
// cost_function.AddParameterBlock(10);
// cost_function.SetNumResiduals(21);
#ifndef CERES_PUBLIC_DYNAMIC_NUMERIC_DIFF_COST_FUNCTION_H_
#define CERES_PUBLIC_DYNAMIC_NUMERIC_DIFF_COST_FUNCTION_H_
#include <cmath>
#include <numeric>
#include <vector>
#include "ceres/cost_function.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/numeric_diff.h"
#include "ceres/numeric_diff_options.h"
#include "glog/logging.h"
namespace ceres {
template <typename CostFunctor, NumericDiffMethodType method = CENTRAL>
class DynamicNumericDiffCostFunction : public CostFunction {
class DynamicNumericDiffCostFunction : public DynamicCostFunction {
public:
explicit DynamicNumericDiffCostFunction(
const CostFunctor* functor,
Ownership ownership = TAKE_OWNERSHIP,
const NumericDiffOptions& options = NumericDiffOptions())
: functor_(functor),
ownership_(ownership),
options_(options) {
}
: functor_(functor), ownership_(ownership), options_(options) {}
virtual ~DynamicNumericDiffCostFunction() {
if (ownership_ != TAKE_OWNERSHIP) {
@@ -91,28 +90,22 @@ class DynamicNumericDiffCostFunction : public CostFunction {
}
}
void AddParameterBlock(int size) {
mutable_parameter_block_sizes()->push_back(size);
}
void SetNumResiduals(int num_residuals) {
set_num_residuals(num_residuals);
}
virtual bool Evaluate(double const* const* parameters,
double* residuals,
double** jacobians) const {
bool Evaluate(double const* const* parameters,
double* residuals,
double** jacobians) const override {
using internal::NumericDiff;
CHECK_GT(num_residuals(), 0)
<< "You must call DynamicNumericDiffCostFunction::SetNumResiduals() "
<< "before DynamicNumericDiffCostFunction::Evaluate().";
const std::vector<int32>& block_sizes = parameter_block_sizes();
const std::vector<int32_t>& block_sizes = parameter_block_sizes();
CHECK(!block_sizes.empty())
<< "You must call DynamicNumericDiffCostFunction::AddParameterBlock() "
<< "before DynamicNumericDiffCostFunction::Evaluate().";
const bool status = EvaluateCostFunctor(parameters, residuals);
const bool status =
internal::VariadicEvaluate<internal::DynamicParameterDims>(
*functor_.get(), parameters, residuals);
if (jacobians == NULL || !status) {
return status;
}
@@ -123,8 +116,8 @@ class DynamicNumericDiffCostFunction : public CostFunction {
std::vector<double*> parameters_references_copy(block_sizes.size());
parameters_references_copy[0] = &parameters_copy[0];
for (size_t block = 1; block < block_sizes.size(); ++block) {
parameters_references_copy[block] = parameters_references_copy[block - 1]
+ block_sizes[block - 1];
parameters_references_copy[block] =
parameters_references_copy[block - 1] + block_sizes[block - 1];
}
// Copy the parameters into the local temp space.
@@ -136,18 +129,20 @@ class DynamicNumericDiffCostFunction : public CostFunction {
for (size_t block = 0; block < block_sizes.size(); ++block) {
if (jacobians[block] != NULL &&
!NumericDiff<CostFunctor, method, DYNAMIC,
DYNAMIC, DYNAMIC, DYNAMIC, DYNAMIC, DYNAMIC,
DYNAMIC, DYNAMIC, DYNAMIC, DYNAMIC, DYNAMIC,
DYNAMIC, DYNAMIC>::EvaluateJacobianForParameterBlock(
functor_.get(),
residuals,
options_,
this->num_residuals(),
block,
block_sizes[block],
&parameters_references_copy[0],
jacobians[block])) {
!NumericDiff<CostFunctor,
method,
ceres::DYNAMIC,
internal::DynamicParameterDims,
ceres::DYNAMIC,
ceres::DYNAMIC>::
EvaluateJacobianForParameterBlock(functor_.get(),
residuals,
options_,
this->num_residuals(),
block,
block_sizes[block],
&parameters_references_copy[0],
jacobians[block])) {
return false;
}
}
@@ -155,31 +150,7 @@ class DynamicNumericDiffCostFunction : public CostFunction {
}
private:
bool EvaluateCostFunctor(double const* const* parameters,
double* residuals) const {
return EvaluateCostFunctorImpl(functor_.get(),
parameters,
residuals,
functor_.get());
}
// Helper templates to allow evaluation of a functor or a
// CostFunction.
bool EvaluateCostFunctorImpl(const CostFunctor* functor,
double const* const* parameters,
double* residuals,
const void* /* NOT USED */) const {
return (*functor)(parameters, residuals);
}
bool EvaluateCostFunctorImpl(const CostFunctor* functor,
double const* const* parameters,
double* residuals,
const CostFunction* /* NOT USED */) const {
return functor->Evaluate(parameters, residuals, NULL);
}
internal::scoped_ptr<const CostFunctor> functor_;
std::unique_ptr<const CostFunctor> functor_;
Ownership ownership_;
NumericDiffOptions options_;
};

View File

@@ -0,0 +1,80 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: mierle@gmail.com (Keir Mierle)
#ifndef CERES_PUBLIC_EVALUATION_CALLBACK_H_
#define CERES_PUBLIC_EVALUATION_CALLBACK_H_
#include "ceres/internal/port.h"
namespace ceres {
// Using this callback interface, Ceres can notify you when it is
// about to evaluate the residuals or jacobians. With the callback,
// you can share computation between residual blocks by doing the
// shared computation in PrepareForEvaluation() before Ceres calls
// CostFunction::Evaluate(). It also enables caching results between a
// pure residual evaluation and a residual & jacobian evaluation, via
// the new_evaluation_point argument.
//
// One use case for this callback is if the cost function compute is
// moved to the GPU. In that case, the prepare call does the actual
// cost function evaluation, and subsequent calls from Ceres to the
// actual cost functions merely copy the results from the GPU onto the
// corresponding blocks for Ceres to plug into the solver.
//
// NOTE: Ceres provides no mechanism to share data other than the
// notification from the callback. Users must provide access to
// pre-computed shared data to their cost functions behind the scenes;
// this all happens without Ceres knowing.
//
// One approach is to put a pointer to the shared data in each cost
// function (recommended) or to use a global shared variable
// (discouraged; bug-prone). As far as Ceres is concerned, it is
// evaluating cost functions like any other; it just so happens that
// behind the scenes the cost functions reuse pre-computed data to
// execute faster.
class CERES_EXPORT EvaluationCallback {
public:
virtual ~EvaluationCallback() {}
// Called before Ceres requests residuals or jacobians for a given setting of
// the parameters. User parameters (the double* values provided to the cost
// functions) are fixed until the next call to PrepareForEvaluation(). If
// new_evaluation_point == true, then this is a new point that is different
// from the last evaluated point. Otherwise, it is the same point that was
// evaluated previously (either jacobian or residual) and the user can use
// cached results from previous evaluations.
virtual void PrepareForEvaluation(bool evaluate_jacobians,
bool new_evaluation_point) = 0;
};
} // namespace ceres
#endif // CERES_PUBLIC_EVALUATION_CALLBACK_H_

View File

@@ -0,0 +1,54 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#ifndef CERES_PUBLIC_FIRST_ORDER_FUNCTION_H_
#define CERES_PUBLIC_FIRST_ORDER_FUNCTION_H_
#include "ceres/internal/port.h"
namespace ceres {
// A FirstOrderFunction object implements the evaluation of a function
// and its gradient.
class CERES_EXPORT FirstOrderFunction {
public:
virtual ~FirstOrderFunction() {}
// cost is never null. gradient may be null. The return value
// indicates whether the evaluation was successful or not.
virtual bool Evaluate(const double* const parameters,
double* cost,
double* gradient) const = 0;
virtual int NumParameters() const = 0;
};
} // namespace ceres
#endif // CERES_PUBLIC_FIRST_ORDER_FUNCTION_H_

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -34,15 +34,14 @@
#ifndef CERES_PUBLIC_GRADIENT_CHECKER_H_
#define CERES_PUBLIC_GRADIENT_CHECKER_H_
#include <vector>
#include <memory>
#include <string>
#include <vector>
#include "ceres/cost_function.h"
#include "ceres/dynamic_numeric_diff_cost_function.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/internal/macros.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/local_parameterization.h"
#include "glog/logging.h"
@@ -64,13 +63,13 @@ namespace ceres {
//
// How to use: Fill in an array of pointers to parameter blocks for your
// CostFunction, and then call Probe(). Check that the return value is 'true'.
class GradientChecker {
class CERES_EXPORT GradientChecker {
public:
// This will not take ownership of the cost function or local
// parameterizations.
//
// function: The cost function to probe.
// local_parameterization: A vector of local parameterizations for each
// local_parameterizations: A vector of local parameterizations for each
// parameter. May be NULL or contain NULL pointers to indicate that the
// respective parameter does not have a local parameterization.
// options: Options to use for numerical differentiation.
@@ -80,7 +79,7 @@ class GradientChecker {
const NumericDiffOptions& options);
// Contains results from a call to Probe for later inspection.
struct ProbeResults {
struct CERES_EXPORT ProbeResults {
// The return value of the cost function.
bool return_value;
@@ -100,10 +99,10 @@ class GradientChecker {
// Derivatives as computed by the cost function in local space.
std::vector<Matrix> local_jacobians;
// Derivatives as computed by nuerical differentiation in local space.
// Derivatives as computed by numerical differentiation in local space.
std::vector<Matrix> numeric_jacobians;
// Derivatives as computed by nuerical differentiation in local space.
// Derivatives as computed by numerical differentiation in local space.
std::vector<Matrix> local_numeric_jacobians;
// Contains the maximum relative error found in the local Jacobians.
@@ -137,11 +136,13 @@ class GradientChecker {
ProbeResults* results) const;
private:
CERES_DISALLOW_IMPLICIT_CONSTRUCTORS(GradientChecker);
GradientChecker() = delete;
GradientChecker(const GradientChecker&) = delete;
void operator=(const GradientChecker&) = delete;
std::vector<const LocalParameterization*> local_parameterizations_;
const CostFunction* function_;
internal::scoped_ptr<CostFunction> finite_diff_cost_function_;
std::unique_ptr<CostFunction> finite_diff_cost_function_;
};
} // namespace ceres

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -31,9 +31,10 @@
#ifndef CERES_PUBLIC_GRADIENT_PROBLEM_H_
#define CERES_PUBLIC_GRADIENT_PROBLEM_H_
#include "ceres/internal/macros.h"
#include <memory>
#include "ceres/first_order_function.h"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/local_parameterization.h"
namespace ceres {
@@ -105,21 +106,9 @@ class CERES_EXPORT GradientProblem {
bool Plus(const double* x, const double* delta, double* x_plus_delta) const;
private:
internal::scoped_ptr<FirstOrderFunction> function_;
internal::scoped_ptr<LocalParameterization> parameterization_;
internal::scoped_array<double> scratch_;
};
// A FirstOrderFunction object implements the evaluation of a function
// and its gradient.
class CERES_EXPORT FirstOrderFunction {
public:
virtual ~FirstOrderFunction() {}
// cost is never NULL. gradient may be null.
virtual bool Evaluate(const double* const parameters,
double* cost,
double* gradient) const = 0;
virtual int NumParameters() const = 0;
std::unique_ptr<FirstOrderFunction> function_;
std::unique_ptr<LocalParameterization> parameterization_;
std::unique_ptr<double[]> scratch_;
};
} // namespace ceres

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -34,11 +34,11 @@
#include <cmath>
#include <string>
#include <vector>
#include "ceres/internal/macros.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/port.h"
#include "ceres/iteration_callback.h"
#include "ceres/types.h"
#include "ceres/internal/disable_warnings.h"
namespace ceres {
@@ -54,39 +54,15 @@ class CERES_EXPORT GradientProblemSolver {
//
// The constants are defined inside types.h
struct CERES_EXPORT Options {
// Default constructor that sets up a generic sparse problem.
Options() {
line_search_direction_type = LBFGS;
line_search_type = WOLFE;
nonlinear_conjugate_gradient_type = FLETCHER_REEVES;
max_lbfgs_rank = 20;
use_approximate_eigenvalue_bfgs_scaling = false;
line_search_interpolation_type = CUBIC;
min_line_search_step_size = 1e-9;
line_search_sufficient_function_decrease = 1e-4;
max_line_search_step_contraction = 1e-3;
min_line_search_step_contraction = 0.6;
max_num_line_search_step_size_iterations = 20;
max_num_line_search_direction_restarts = 5;
line_search_sufficient_curvature_decrease = 0.9;
max_line_search_step_expansion = 10.0;
max_num_iterations = 50;
max_solver_time_in_seconds = 1e9;
function_tolerance = 1e-6;
gradient_tolerance = 1e-10;
logging_type = PER_MINIMIZER_ITERATION;
minimizer_progress_to_stdout = false;
}
// Returns true if the options struct has a valid
// configuration. Returns false otherwise, and fills in *error
// with a message describing the problem.
bool IsValid(std::string* error) const;
// Minimizer options ----------------------------------------
LineSearchDirectionType line_search_direction_type;
LineSearchType line_search_type;
NonlinearConjugateGradientType nonlinear_conjugate_gradient_type;
LineSearchDirectionType line_search_direction_type = LBFGS;
LineSearchType line_search_type = WOLFE;
NonlinearConjugateGradientType nonlinear_conjugate_gradient_type = FLETCHER_REEVES;
// The LBFGS hessian approximation is a low rank approximation to
// the inverse of the Hessian matrix. The rank of the
@@ -111,8 +87,8 @@ class CERES_EXPORT GradientProblemSolver {
// method, please see:
//
// Nocedal, J. (1980). "Updating Quasi-Newton Matrices with
// Limited Storage". Mathematics of Computation 35 (151): 773782.
int max_lbfgs_rank;
// Limited Storage". Mathematics of Computation 35 (151): 773-782.
int max_lbfgs_rank = 20;
// As part of the (L)BFGS update step (BFGS) / right-multiply step (L-BFGS),
// the initial inverse Hessian approximation is taken to be the Identity.
@@ -134,18 +110,18 @@ class CERES_EXPORT GradientProblemSolver {
// Oren S.S., Self-scaling variable metric (SSVM) algorithms
// Part II: Implementation and experiments, Management Science,
// 20(5), 863-874, 1974.
bool use_approximate_eigenvalue_bfgs_scaling;
bool use_approximate_eigenvalue_bfgs_scaling = false;
// Degree of the polynomial used to approximate the objective
// function. Valid values are BISECTION, QUADRATIC and CUBIC.
//
// BISECTION corresponds to pure backtracking search with no
// interpolation.
LineSearchInterpolationType line_search_interpolation_type;
LineSearchInterpolationType line_search_interpolation_type = CUBIC;
// If during the line search, the step_size falls below this
// value, it is truncated to zero.
double min_line_search_step_size;
double min_line_search_step_size = 1e-9;
// Line search parameters.
@@ -159,7 +135,7 @@ class CERES_EXPORT GradientProblemSolver {
//
// f(step_size) <= f(0) + sufficient_decrease * f'(0) * step_size
//
double line_search_sufficient_function_decrease;
double line_search_sufficient_function_decrease = 1e-4;
// In each iteration of the line search,
//
@@ -169,7 +145,7 @@ class CERES_EXPORT GradientProblemSolver {
//
// 0 < max_step_contraction < min_step_contraction < 1
//
double max_line_search_step_contraction;
double max_line_search_step_contraction = 1e-3;
// In each iteration of the line search,
//
@@ -179,19 +155,19 @@ class CERES_EXPORT GradientProblemSolver {
//
// 0 < max_step_contraction < min_step_contraction < 1
//
double min_line_search_step_contraction;
double min_line_search_step_contraction = 0.6;
// Maximum number of trial step size iterations during each line search,
// if a step size satisfying the search conditions cannot be found within
// this number of trials, the line search will terminate.
int max_num_line_search_step_size_iterations;
int max_num_line_search_step_size_iterations = 20;
// Maximum number of restarts of the line search direction algorithm before
// terminating the optimization. Restarts of the line search direction
// algorithm occur when the current algorithm fails to produce a new descent
// direction. This typically indicates a numerical failure, or a breakdown
// in the validity of the approximations used.
int max_num_line_search_direction_restarts;
int max_num_line_search_direction_restarts = 5;
// The strong Wolfe conditions consist of the Armijo sufficient
// decrease condition, and an additional requirement that the
@@ -204,7 +180,7 @@ class CERES_EXPORT GradientProblemSolver {
//
// Where f() is the line search objective and f'() is the derivative
// of f w.r.t step_size (d f / d step_size).
double line_search_sufficient_curvature_decrease;
double line_search_sufficient_curvature_decrease = 0.9;
// During the bracketing phase of the Wolfe search, the step size is
// increased until either a point satisfying the Wolfe conditions is
@@ -215,36 +191,49 @@ class CERES_EXPORT GradientProblemSolver {
// new_step_size <= max_step_expansion * step_size.
//
// By definition for expansion, max_step_expansion > 1.0.
double max_line_search_step_expansion;
double max_line_search_step_expansion = 10.0;
// Maximum number of iterations for the minimizer to run for.
int max_num_iterations;
int max_num_iterations = 50;
// Maximum time for which the minimizer should run for.
double max_solver_time_in_seconds;
double max_solver_time_in_seconds = 1e9;
// Minimizer terminates when
//
// (new_cost - old_cost) < function_tolerance * old_cost;
//
double function_tolerance;
double function_tolerance = 1e-6;
// Minimizer terminates when
//
// max_i |x - Project(Plus(x, -g(x))| < gradient_tolerance
//
// This value should typically be 1e-4 * function_tolerance.
double gradient_tolerance;
double gradient_tolerance = 1e-10;
// Minimizer terminates when
//
// |step|_2 <= parameter_tolerance * ( |x|_2 + parameter_tolerance)
//
double parameter_tolerance = 1e-8;
// Logging options ---------------------------------------------------------
LoggingType logging_type;
LoggingType logging_type = PER_MINIMIZER_ITERATION;
// By default the Minimizer progress is logged to VLOG(1), which
// is sent to STDERR depending on the vlog level. If this flag is
// set to true, and logging_type is not SILENT, the logging output
// is sent to STDOUT.
bool minimizer_progress_to_stdout;
bool minimizer_progress_to_stdout = false;
// If true, the user's parameter blocks are updated at the end of
// every Minimizer iteration, otherwise they are updated when the
// Minimizer terminates. This is useful if, for example, the user
// wishes to visualize the state of the optimization every
// iteration.
bool update_state_every_iteration = false;
// Callbacks that are executed at the end of each iteration of the
// Minimizer. An iteration may terminate midway, either due to
@@ -265,8 +254,6 @@ class CERES_EXPORT GradientProblemSolver {
};
struct CERES_EXPORT Summary {
Summary();
// A brief one line description of the state of the solver after
// termination.
std::string BriefReport() const;
@@ -278,65 +265,72 @@ class CERES_EXPORT GradientProblemSolver {
bool IsSolutionUsable() const;
// Minimizer summary -------------------------------------------------
TerminationType termination_type;
TerminationType termination_type = FAILURE;
// Reason why the solver terminated.
std::string message;
std::string message = "ceres::GradientProblemSolve was not called.";
// Cost of the problem (value of the objective function) before
// the optimization.
double initial_cost;
double initial_cost = -1.0;
// Cost of the problem (value of the objective function) after the
// optimization.
double final_cost;
double final_cost = -1.0;
// IterationSummary for each minimizer iteration in order.
std::vector<IterationSummary> iterations;
// Number of times the cost (and not the gradient) was evaluated.
int num_cost_evaluations = -1;
// Number of times the gradient (and the cost) were evaluated.
int num_gradient_evaluations = -1;
// Sum total of all time spent inside Ceres when Solve is called.
double total_time_in_seconds;
double total_time_in_seconds = -1.0;
// Time (in seconds) spent evaluating the cost.
double cost_evaluation_time_in_seconds;
double cost_evaluation_time_in_seconds = -1.0;
// Time (in seconds) spent evaluating the gradient.
double gradient_evaluation_time_in_seconds;
double gradient_evaluation_time_in_seconds = -1.0;
// Time (in seconds) spent minimizing the interpolating polynomial
// to compute the next candidate step size as part of a line search.
double line_search_polynomial_minimization_time_in_seconds;
double line_search_polynomial_minimization_time_in_seconds = -1.0;
// Number of parameters in the probem.
int num_parameters;
// Number of parameters in the problem.
int num_parameters = -1;
// Dimension of the tangent space of the problem.
int num_local_parameters;
int num_local_parameters = -1;
// Type of line search direction used.
LineSearchDirectionType line_search_direction_type;
LineSearchDirectionType line_search_direction_type = LBFGS;
// Type of the line search algorithm used.
LineSearchType line_search_type;
LineSearchType line_search_type = WOLFE;
// When performing line search, the degree of the polynomial used
// to approximate the objective function.
LineSearchInterpolationType line_search_interpolation_type;
LineSearchInterpolationType line_search_interpolation_type = CUBIC;
// If the line search direction is NONLINEAR_CONJUGATE_GRADIENT,
// then this indicates the particular variant of non-linear
// conjugate gradient used.
NonlinearConjugateGradientType nonlinear_conjugate_gradient_type;
NonlinearConjugateGradientType nonlinear_conjugate_gradient_type =
FLETCHER_REEVES;
// If the type of the line search direction is LBFGS, then this
// indicates the rank of the Hessian approximation.
int max_lbfgs_rank;
int max_lbfgs_rank = -1;
};
// Once a least squares problem has been built, this function takes
// the problem and optimizes it based on the values of the options
// parameters. Upon return, a detailed summary of the work performed
// by the preprocessor, the non-linear minmizer and the linear
// by the preprocessor, the non-linear minimizer and the linear
// solver are reported in the summary object.
virtual void Solve(const GradientProblemSolver::Options& options,
const GradientProblem& problem,

View File

@@ -0,0 +1,95 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2020 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: darius.rueckert@fau.de (Darius Rueckert)
//
#ifndef CERES_PUBLIC_INTERNAL_ARRAY_SELECTOR_H_
#define CERES_PUBLIC_INTERNAL_ARRAY_SELECTOR_H_
#include <array>
#include <vector>
#include "ceres/internal/fixed_array.h"
#include "ceres/types.h"
namespace ceres {
namespace internal {
// StaticFixedArray selects the best array implementation based on template
// arguments. If the size is not known at compile-time, pass
// ceres::DYNAMIC as a size-template argument.
//
// Three different containers are selected in different scenarios:
//
// num_elements == DYNAMIC:
// -> ceres::internal::FixedArray<T, max_stack_size>(size)
// num_elements != DYNAMIC && num_elements <= max_stack_size
// -> std::array<T,num_elements>
// num_elements != DYNAMIC && num_elements > max_stack_size
// -> std::vector<T>(num_elements)
//
template <typename T,
int num_elements,
int max_num_elements_on_stack,
bool dynamic = (num_elements == DYNAMIC),
bool fits_on_stack = (num_elements <= max_num_elements_on_stack)>
struct ArraySelector {};
template <typename T,
int num_elements,
int max_num_elements_on_stack,
bool fits_on_stack>
struct ArraySelector<T,
num_elements,
max_num_elements_on_stack,
true,
fits_on_stack>
: ceres::internal::FixedArray<T, max_num_elements_on_stack> {
ArraySelector(int s)
: ceres::internal::FixedArray<T, max_num_elements_on_stack>(s) {}
};
template <typename T, int num_elements, int max_num_elements_on_stack>
struct ArraySelector<T, num_elements, max_num_elements_on_stack, false, true>
: std::array<T, num_elements> {
ArraySelector(int s) { CHECK_EQ(s, num_elements); }
};
template <typename T, int num_elements, int max_num_elements_on_stack>
struct ArraySelector<T, num_elements, max_num_elements_on_stack, false, false>
: std::vector<T> {
ArraySelector(int s) : std::vector<T>(s) { CHECK_EQ(s, num_elements); }
};
} // namespace internal
} // namespace ceres
#endif // CERES_PUBLIC_INTERNAL_ARRAY_SELECTOR_H_

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -30,10 +30,10 @@
//
// Computation of the Jacobian matrix for vector-valued functions of multiple
// variables, using automatic differentiation based on the implementation of
// dual numbers in jet.h. Before reading the rest of this file, it is adivsable
// dual numbers in jet.h. Before reading the rest of this file, it is advisable
// to read jet.h's header comment in detail.
//
// The helper wrapper AutoDiff::Differentiate() computes the jacobian of
// The helper wrapper AutoDifferentiate() computes the jacobian of
// functors with templated operator() taking this form:
//
// struct F {
@@ -57,7 +57,7 @@
// [ * ]
//
// Similar to the 2-parameter example for f described in jet.h, computing the
// jacobian dy/dx is done by substutiting a suitable jet object for x and all
// jacobian dy/dx is done by substituting a suitable jet object for x and all
// intermediate steps of the computation of F. Since x is has 4 dimensions, use
// a Jet<double, 4>.
//
@@ -142,16 +142,33 @@
#include <stddef.h>
#include "ceres/jet.h"
#include <array>
#include <utility>
#include "ceres/internal/array_selector.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/internal/parameter_dims.h"
#include "ceres/internal/variadic_evaluate.h"
#include "ceres/jet.h"
#include "ceres/types.h"
#include "glog/logging.h"
// If the number of parameters exceeds this values, the corresponding jets are
// placed on the heap. This will reduce performance by a factor of 2-5 on
// current compilers.
#ifndef CERES_AUTODIFF_MAX_PARAMETERS_ON_STACK
#define CERES_AUTODIFF_MAX_PARAMETERS_ON_STACK 50
#endif
#ifndef CERES_AUTODIFF_MAX_RESIDUALS_ON_STACK
#define CERES_AUTODIFF_MAX_RESIDUALS_ON_STACK 20
#endif
namespace ceres {
namespace internal {
// Extends src by a 1st order pertubation for every dimension and puts it in
// Extends src by a 1st order perturbation for every dimension and puts it in
// dst. The size of src is N. Since this is also used for perturbations in
// blocked arrays, offset is used to shift which part of the jet the
// perturbation occurs. This is used to set up the extended x augmented by an
@@ -165,21 +182,62 @@ namespace internal {
//
// is what would get put in dst if N was 3, offset was 3, and the jet type JetT
// was 8-dimensional.
template <typename JetT, typename T, int N>
inline void Make1stOrderPerturbation(int offset, const T* src, JetT* dst) {
DCHECK(src);
DCHECK(dst);
for (int j = 0; j < N; ++j) {
dst[j].a = src[j];
dst[j].v.setZero();
dst[j].v[offset + j] = T(1.0);
template <int j, int N, int Offset, typename T, typename JetT>
struct Make1stOrderPerturbation {
public:
inline static void Apply(const T* src, JetT* dst) {
if (j == 0) {
DCHECK(src);
DCHECK(dst);
}
dst[j] = JetT(src[j], j + Offset);
Make1stOrderPerturbation<j + 1, N, Offset, T, JetT>::Apply(src, dst);
}
}
};
template <int N, int Offset, typename T, typename JetT>
struct Make1stOrderPerturbation<N, N, Offset, T, JetT> {
public:
static void Apply(const T* /*src*/, JetT* /*dst*/) {}
};
// Calls Make1stOrderPerturbation for every parameter block.
//
// Example:
// If one having three parameter blocks with dimensions (3, 2, 4), the call
// Make1stOrderPerturbations<integer_sequence<3, 2, 4>::Apply(params, x);
// will result in the following calls to Make1stOrderPerturbation:
// Make1stOrderPerturbation<0, 3, 0>::Apply(params[0], x + 0);
// Make1stOrderPerturbation<0, 2, 3>::Apply(params[1], x + 3);
// Make1stOrderPerturbation<0, 4, 5>::Apply(params[2], x + 5);
template <typename Seq, int ParameterIdx = 0, int Offset = 0>
struct Make1stOrderPerturbations;
template <int N, int... Ns, int ParameterIdx, int Offset>
struct Make1stOrderPerturbations<std::integer_sequence<int, N, Ns...>,
ParameterIdx,
Offset> {
template <typename T, typename JetT>
inline static void Apply(T const* const* parameters, JetT* x) {
Make1stOrderPerturbation<0, N, Offset, T, JetT>::Apply(
parameters[ParameterIdx], x + Offset);
Make1stOrderPerturbations<std::integer_sequence<int, Ns...>,
ParameterIdx + 1,
Offset + N>::Apply(parameters, x);
}
};
// End of 'recursion'. Nothing more to do.
template <int ParameterIdx, int Total>
struct Make1stOrderPerturbations<std::integer_sequence<int>, ParameterIdx, Total> {
template <typename T, typename JetT>
static void Apply(T const* const* /* NOT USED */, JetT* /* NOT USED */) {}
};
// Takes the 0th order part of src, assumed to be a Jet type, and puts it in
// dst. This is used to pick out the "vector" part of the extended y.
template <typename JetT, typename T>
inline void Take0thOrderPart(int M, const JetT *src, T dst) {
inline void Take0thOrderPart(int M, const JetT* src, T dst) {
DCHECK(src);
for (int i = 0; i < M; ++i) {
dst[i] = src[i].a;
@@ -188,129 +246,118 @@ inline void Take0thOrderPart(int M, const JetT *src, T dst) {
// Takes N 1st order parts, starting at index N0, and puts them in the M x N
// matrix 'dst'. This is used to pick out the "matrix" parts of the extended y.
template <typename JetT, typename T, int N0, int N>
inline void Take1stOrderPart(const int M, const JetT *src, T *dst) {
template <int N0, int N, typename JetT, typename T>
inline void Take1stOrderPart(const int M, const JetT* src, T* dst) {
DCHECK(src);
DCHECK(dst);
for (int i = 0; i < M; ++i) {
Eigen::Map<Eigen::Matrix<T, N, 1> >(dst + N * i, N) =
Eigen::Map<Eigen::Matrix<T, N, 1>>(dst + N * i, N) =
src[i].v.template segment<N>(N0);
}
}
// This is in a struct because default template parameters on a
// function are not supported in C++03 (though it is available in
// C++0x). N0 through N5 are the dimension of the input arguments to
// the user supplied functor.
template <typename Functor, typename T,
int N0 = 0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0,
int N5 = 0, int N6 = 0, int N7 = 0, int N8 = 0, int N9 = 0>
struct AutoDiff {
static bool Differentiate(const Functor& functor,
T const *const *parameters,
int num_outputs,
T *function_value,
T **jacobians) {
// This block breaks the 80 column rule to keep it somewhat readable.
DCHECK_GT(num_outputs, 0);
DCHECK((!N1 && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && (N9 > 0))) // NOLINT
<< "Zero block cannot precede a non-zero block. Block sizes are "
<< "(ignore trailing 0s): " << N0 << ", " << N1 << ", " << N2 << ", "
<< N3 << ", " << N4 << ", " << N5 << ", " << N6 << ", " << N7 << ", "
<< N8 << ", " << N9;
// Calls Take1stOrderPart for every parameter block.
//
// Example:
// If one having three parameter blocks with dimensions (3, 2, 4), the call
// Take1stOrderParts<integer_sequence<3, 2, 4>::Apply(num_outputs,
// output,
// jacobians);
// will result in the following calls to Take1stOrderPart:
// if (jacobians[0]) {
// Take1stOrderPart<0, 3>(num_outputs, output, jacobians[0]);
// }
// if (jacobians[1]) {
// Take1stOrderPart<3, 2>(num_outputs, output, jacobians[1]);
// }
// if (jacobians[2]) {
// Take1stOrderPart<5, 4>(num_outputs, output, jacobians[2]);
// }
template <typename Seq, int ParameterIdx = 0, int Offset = 0>
struct Take1stOrderParts;
typedef Jet<T, N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9> JetT;
FixedArray<JetT, (256 * 7) / sizeof(JetT)> x(
N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9 + num_outputs);
// These are the positions of the respective jets in the fixed array x.
const int jet0 = 0;
const int jet1 = N0;
const int jet2 = N0 + N1;
const int jet3 = N0 + N1 + N2;
const int jet4 = N0 + N1 + N2 + N3;
const int jet5 = N0 + N1 + N2 + N3 + N4;
const int jet6 = N0 + N1 + N2 + N3 + N4 + N5;
const int jet7 = N0 + N1 + N2 + N3 + N4 + N5 + N6;
const int jet8 = N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7;
const int jet9 = N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8;
const JetT *unpacked_parameters[10] = {
x.get() + jet0,
x.get() + jet1,
x.get() + jet2,
x.get() + jet3,
x.get() + jet4,
x.get() + jet5,
x.get() + jet6,
x.get() + jet7,
x.get() + jet8,
x.get() + jet9,
};
JetT* output = x.get() + N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9;
#define CERES_MAKE_1ST_ORDER_PERTURBATION(i) \
if (N ## i) { \
internal::Make1stOrderPerturbation<JetT, T, N ## i>( \
jet ## i, \
parameters[i], \
x.get() + jet ## i); \
template <int N, int... Ns, int ParameterIdx, int Offset>
struct Take1stOrderParts<std::integer_sequence<int, N, Ns...>,
ParameterIdx,
Offset> {
template <typename JetT, typename T>
inline static void Apply(int num_outputs, JetT* output, T** jacobians) {
if (jacobians[ParameterIdx]) {
Take1stOrderPart<Offset, N>(num_outputs, output, jacobians[ParameterIdx]);
}
CERES_MAKE_1ST_ORDER_PERTURBATION(0);
CERES_MAKE_1ST_ORDER_PERTURBATION(1);
CERES_MAKE_1ST_ORDER_PERTURBATION(2);
CERES_MAKE_1ST_ORDER_PERTURBATION(3);
CERES_MAKE_1ST_ORDER_PERTURBATION(4);
CERES_MAKE_1ST_ORDER_PERTURBATION(5);
CERES_MAKE_1ST_ORDER_PERTURBATION(6);
CERES_MAKE_1ST_ORDER_PERTURBATION(7);
CERES_MAKE_1ST_ORDER_PERTURBATION(8);
CERES_MAKE_1ST_ORDER_PERTURBATION(9);
#undef CERES_MAKE_1ST_ORDER_PERTURBATION
if (!VariadicEvaluate<Functor, JetT,
N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>::Call(
functor, unpacked_parameters, output)) {
return false;
}
internal::Take0thOrderPart(num_outputs, output, function_value);
#define CERES_TAKE_1ST_ORDER_PERTURBATION(i) \
if (N ## i) { \
if (jacobians[i]) { \
internal::Take1stOrderPart<JetT, T, \
jet ## i, \
N ## i>(num_outputs, \
output, \
jacobians[i]); \
} \
}
CERES_TAKE_1ST_ORDER_PERTURBATION(0);
CERES_TAKE_1ST_ORDER_PERTURBATION(1);
CERES_TAKE_1ST_ORDER_PERTURBATION(2);
CERES_TAKE_1ST_ORDER_PERTURBATION(3);
CERES_TAKE_1ST_ORDER_PERTURBATION(4);
CERES_TAKE_1ST_ORDER_PERTURBATION(5);
CERES_TAKE_1ST_ORDER_PERTURBATION(6);
CERES_TAKE_1ST_ORDER_PERTURBATION(7);
CERES_TAKE_1ST_ORDER_PERTURBATION(8);
CERES_TAKE_1ST_ORDER_PERTURBATION(9);
#undef CERES_TAKE_1ST_ORDER_PERTURBATION
return true;
Take1stOrderParts<std::integer_sequence<int, Ns...>,
ParameterIdx + 1,
Offset + N>::Apply(num_outputs, output, jacobians);
}
};
// End of 'recursion'. Nothing more to do.
template <int ParameterIdx, int Offset>
struct Take1stOrderParts<std::integer_sequence<int>, ParameterIdx, Offset> {
template <typename T, typename JetT>
static void Apply(int /* NOT USED*/,
JetT* /* NOT USED*/,
T** /* NOT USED */) {}
};
template <int kNumResiduals,
typename ParameterDims,
typename Functor,
typename T>
inline bool AutoDifferentiate(const Functor& functor,
T const* const* parameters,
int dynamic_num_outputs,
T* function_value,
T** jacobians) {
typedef Jet<T, ParameterDims::kNumParameters> JetT;
using Parameters = typename ParameterDims::Parameters;
if (kNumResiduals != DYNAMIC) {
DCHECK_EQ(kNumResiduals, dynamic_num_outputs);
}
ArraySelector<JetT,
ParameterDims::kNumParameters,
CERES_AUTODIFF_MAX_PARAMETERS_ON_STACK>
parameters_as_jets(ParameterDims::kNumParameters);
// Pointers to the beginning of each parameter block
std::array<JetT*, ParameterDims::kNumParameterBlocks> unpacked_parameters =
ParameterDims::GetUnpackedParameters(parameters_as_jets.data());
// If the number of residuals is fixed, we use the template argument as the
// number of outputs. Otherwise we use the num_outputs parameter. Note: The
// ?-operator here is compile-time evaluated, therefore num_outputs is also
// a compile-time constant for functors with fixed residuals.
const int num_outputs =
kNumResiduals == DYNAMIC ? dynamic_num_outputs : kNumResiduals;
DCHECK_GT(num_outputs, 0);
ArraySelector<JetT, kNumResiduals, CERES_AUTODIFF_MAX_RESIDUALS_ON_STACK>
residuals_as_jets(num_outputs);
// Invalidate the output Jets, so that we can detect if the user
// did not assign values to all of them.
for (int i = 0; i < num_outputs; ++i) {
residuals_as_jets[i].a = kImpossibleValue;
residuals_as_jets[i].v.setConstant(kImpossibleValue);
}
Make1stOrderPerturbations<Parameters>::Apply(parameters,
parameters_as_jets.data());
if (!VariadicEvaluate<ParameterDims>(
functor, unpacked_parameters.data(), residuals_as_jets.data())) {
return false;
}
Take0thOrderPart(num_outputs, residuals_as_jets.data(), function_value);
Take1stOrderParts<Parameters>::Apply(
num_outputs, residuals_as_jets.data(), jacobians);
return true;
}
} // namespace internal
} // namespace ceres

View File

@@ -35,7 +35,7 @@
#ifdef _MSC_VER
#pragma warning( push )
// Disable the warning C4251 which is trigerred by stl classes in
// Disable the warning C4251 which is triggered by stl classes in
// Ceres' public interface. To quote MSDN: "C4251 can be ignored "
// "if you are deriving from a type in the Standard C++ Library"
#pragma warning( disable : 4251 )

View File

@@ -52,40 +52,27 @@ typedef Eigen::Matrix<double,
Eigen::ColMajor> ColMajorMatrix;
typedef Eigen::Map<ColMajorMatrix, 0,
Eigen::Stride<Eigen::Dynamic, 1> > ColMajorMatrixRef;
Eigen::Stride<Eigen::Dynamic, 1>> ColMajorMatrixRef;
typedef Eigen::Map<const ColMajorMatrix,
0,
Eigen::Stride<Eigen::Dynamic, 1> > ConstColMajorMatrixRef;
Eigen::Stride<Eigen::Dynamic, 1>> ConstColMajorMatrixRef;
// C++ does not support templated typdefs, thus the need for this
// struct so that we can support statically sized Matrix and Maps.
template <int num_rows = Eigen::Dynamic, int num_cols = Eigen::Dynamic>
template <int num_rows = Eigen::Dynamic, int num_cols = Eigen::Dynamic>
struct EigenTypes {
typedef Eigen::Matrix <double, num_rows, num_cols, Eigen::RowMajor>
Matrix;
typedef Eigen::Matrix<double,
num_rows,
num_cols,
num_cols == 1 ? Eigen::ColMajor : Eigen::RowMajor>
Matrix;
typedef Eigen::Map<
Eigen::Matrix<double, num_rows, num_cols, Eigen::RowMajor> >
MatrixRef;
typedef Eigen::Matrix <double, num_rows, 1>
Vector;
typedef Eigen::Map <
Eigen::Matrix<double, num_rows, 1> >
VectorRef;
typedef Eigen::Map<
const Eigen::Matrix<double, num_rows, num_cols, Eigen::RowMajor> >
ConstMatrixRef;
typedef Eigen::Map <
const Eigen::Matrix<double, num_rows, 1> >
ConstVectorRef;
typedef Eigen::Map<Matrix> MatrixRef;
typedef Eigen::Map<const Matrix> ConstMatrixRef;
typedef Eigen::Matrix<double, num_rows, 1> Vector;
typedef Eigen::Map<Eigen::Matrix<double, num_rows, 1>> VectorRef;
typedef Eigen::Map<const Eigen::Matrix<double, num_rows, 1>> ConstVectorRef;
};
} // namespace ceres

View File

@@ -1,189 +1,466 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
// Copyright 2018 The Abseil Authors.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
// https://www.apache.org/licenses/LICENSE-2.0
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: rennie@google.com (Jeffrey Rennie)
// Author: sanjay@google.com (Sanjay Ghemawat) -- renamed to FixedArray
// -----------------------------------------------------------------------------
// File: fixed_array.h
// -----------------------------------------------------------------------------
//
// A `FixedArray<T>` represents a non-resizable array of `T` where the length of
// the array can be determined at run-time. It is a good replacement for
// non-standard and deprecated uses of `alloca()` and variable length arrays
// within the GCC extension. (See
// https://gcc.gnu.org/onlinedocs/gcc/Variable-Length.html).
//
// `FixedArray` allocates small arrays inline, keeping performance fast by
// avoiding heap operations. It also helps reduce the chances of
// accidentally overflowing your stack if large input is passed to
// your function.
#ifndef CERES_PUBLIC_INTERNAL_FIXED_ARRAY_H_
#define CERES_PUBLIC_INTERNAL_FIXED_ARRAY_H_
#include <algorithm>
#include <array>
#include <cstddef>
#include "Eigen/Core"
#include "ceres/internal/macros.h"
#include "ceres/internal/manual_constructor.h"
#include <memory>
#include <tuple>
#include <type_traits>
#include <Eigen/Core> // For Eigen::aligned_allocator
#include "ceres/internal/memory.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
// A FixedArray<T> represents a non-resizable array of T where the
// length of the array does not need to be a compile time constant.
//
// FixedArray allocates small arrays inline, and large arrays on
// the heap. It is a good replacement for non-standard and deprecated
// uses of alloca() and variable length arrays (a GCC extension).
//
// FixedArray keeps performance fast for small arrays, because it
// avoids heap operations. It also helps reduce the chances of
// accidentally overflowing your stack if large input is passed to
// your function.
//
// Also, FixedArray is useful for writing portable code. Not all
// compilers support arrays of dynamic size.
constexpr static auto kFixedArrayUseDefault = static_cast<size_t>(-1);
// Most users should not specify an inline_elements argument and let
// FixedArray<> automatically determine the number of elements
// to store inline based on sizeof(T).
// The default fixed array allocator.
//
// If inline_elements is specified, the FixedArray<> implementation
// will store arrays of length <= inline_elements inline.
//
// Finally note that unlike vector<T> FixedArray<T> will not zero-initialize
// simple types like int, double, bool, etc.
//
// Non-POD types will be default-initialized just like regular vectors or
// arrays.
// As one can not easily detect if a struct contains or inherits from a fixed
// size Eigen type, to be safe the Eigen::aligned_allocator is used by default.
// But trivial types can never contain Eigen types, so std::allocator is used to
// safe some heap memory.
template <typename T>
using FixedArrayDefaultAllocator =
typename std::conditional<std::is_trivial<T>::value,
std::allocator<T>,
Eigen::aligned_allocator<T>>::type;
#if defined(_WIN64)
typedef __int64 ssize_t;
#elif defined(_WIN32)
typedef __int32 ssize_t;
#endif
template <typename T, ssize_t inline_elements = -1>
// -----------------------------------------------------------------------------
// FixedArray
// -----------------------------------------------------------------------------
//
// A `FixedArray` provides a run-time fixed-size array, allocating a small array
// inline for efficiency.
//
// Most users should not specify an `inline_elements` argument and let
// `FixedArray` automatically determine the number of elements
// to store inline based on `sizeof(T)`. If `inline_elements` is specified, the
// `FixedArray` implementation will use inline storage for arrays with a
// length <= `inline_elements`.
//
// Note that a `FixedArray` constructed with a `size_type` argument will
// default-initialize its values by leaving trivially constructible types
// uninitialized (e.g. int, int[4], double), and others default-constructed.
// This matches the behavior of c-style arrays and `std::array`, but not
// `std::vector`.
//
// Note that `FixedArray` does not provide a public allocator; if it requires a
// heap allocation, it will do so with global `::operator new[]()` and
// `::operator delete[]()`, even if T provides class-scope overrides for these
// operators.
template <typename T,
size_t N = kFixedArrayUseDefault,
typename A = FixedArrayDefaultAllocator<T>>
class FixedArray {
static_assert(!std::is_array<T>::value || std::extent<T>::value > 0,
"Arrays with unknown bounds cannot be used with FixedArray.");
static constexpr size_t kInlineBytesDefault = 256;
using AllocatorTraits = std::allocator_traits<A>;
// std::iterator_traits isn't guaranteed to be SFINAE-friendly until C++17,
// but this seems to be mostly pedantic.
template <typename Iterator>
using EnableIfForwardIterator = typename std::enable_if<std::is_convertible<
typename std::iterator_traits<Iterator>::iterator_category,
std::forward_iterator_tag>::value>::type;
static constexpr bool DefaultConstructorIsNonTrivial() {
return !std::is_trivially_default_constructible<StorageElement>::value;
}
public:
// For playing nicely with stl:
typedef T value_type;
typedef T* iterator;
typedef T const* const_iterator;
typedef T& reference;
typedef T const& const_reference;
typedef T* pointer;
typedef std::ptrdiff_t difference_type;
typedef size_t size_type;
using allocator_type = typename AllocatorTraits::allocator_type;
using value_type = typename AllocatorTraits::value_type;
using pointer = typename AllocatorTraits::pointer;
using const_pointer = typename AllocatorTraits::const_pointer;
using reference = value_type&;
using const_reference = const value_type&;
using size_type = typename AllocatorTraits::size_type;
using difference_type = typename AllocatorTraits::difference_type;
using iterator = pointer;
using const_iterator = const_pointer;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
// REQUIRES: n >= 0
// Creates an array object that can store "n" elements.
static constexpr size_type inline_elements =
(N == kFixedArrayUseDefault ? kInlineBytesDefault / sizeof(value_type)
: static_cast<size_type>(N));
FixedArray(const FixedArray& other,
const allocator_type& a = allocator_type())
: FixedArray(other.begin(), other.end(), a) {}
FixedArray(FixedArray&& other, const allocator_type& a = allocator_type())
: FixedArray(std::make_move_iterator(other.begin()),
std::make_move_iterator(other.end()),
a) {}
// Creates an array object that can store `n` elements.
// Note that trivially constructible elements will be uninitialized.
explicit FixedArray(size_type n, const allocator_type& a = allocator_type())
: storage_(n, a) {
if (DefaultConstructorIsNonTrivial()) {
ConstructRange(storage_.alloc(), storage_.begin(), storage_.end());
}
}
// Creates an array initialized with `n` copies of `val`.
FixedArray(size_type n,
const value_type& val,
const allocator_type& a = allocator_type())
: storage_(n, a) {
ConstructRange(storage_.alloc(), storage_.begin(), storage_.end(), val);
}
// Creates an array initialized with the size and contents of `init_list`.
FixedArray(std::initializer_list<value_type> init_list,
const allocator_type& a = allocator_type())
: FixedArray(init_list.begin(), init_list.end(), a) {}
// Creates an array initialized with the elements from the input
// range. The array's size will always be `std::distance(first, last)`.
// REQUIRES: Iterator must be a forward_iterator or better.
template <typename Iterator, EnableIfForwardIterator<Iterator>* = nullptr>
FixedArray(Iterator first,
Iterator last,
const allocator_type& a = allocator_type())
: storage_(std::distance(first, last), a) {
CopyRange(storage_.alloc(), storage_.begin(), first, last);
}
~FixedArray() noexcept {
for (auto* cur = storage_.begin(); cur != storage_.end(); ++cur) {
AllocatorTraits::destroy(storage_.alloc(), cur);
}
}
// Assignments are deleted because they break the invariant that the size of a
// `FixedArray` never changes.
void operator=(FixedArray&&) = delete;
void operator=(const FixedArray&) = delete;
// FixedArray::size()
//
// FixedArray<T> will not zero-initialiaze POD (simple) types like int,
// double, bool, etc.
// Non-POD types will be default-initialized just like regular vectors or
// arrays.
explicit FixedArray(size_type n);
// Returns the length of the fixed array.
size_type size() const { return storage_.size(); }
// Releases any resources.
~FixedArray();
// Returns the length of the array.
inline size_type size() const { return size_; }
// Returns the memory size of the array in bytes.
inline size_t memsize() const { return size_ * sizeof(T); }
// Returns a pointer to the underlying element array.
inline const T* get() const { return &array_[0].element; }
inline T* get() { return &array_[0].element; }
// REQUIRES: 0 <= i < size()
// Returns a reference to the "i"th element.
inline T& operator[](size_type i) {
DCHECK_LT(i, size_);
return array_[i].element;
// FixedArray::max_size()
//
// Returns the largest possible value of `std::distance(begin(), end())` for a
// `FixedArray<T>`. This is equivalent to the most possible addressable bytes
// over the number of bytes taken by T.
constexpr size_type max_size() const {
return (std::numeric_limits<difference_type>::max)() / sizeof(value_type);
}
// FixedArray::empty()
//
// Returns whether or not the fixed array is empty.
bool empty() const { return size() == 0; }
// FixedArray::memsize()
//
// Returns the memory size of the fixed array in bytes.
size_t memsize() const { return size() * sizeof(value_type); }
// FixedArray::data()
//
// Returns a const T* pointer to elements of the `FixedArray`. This pointer
// can be used to access (but not modify) the contained elements.
const_pointer data() const { return AsValueType(storage_.begin()); }
// Overload of FixedArray::data() to return a T* pointer to elements of the
// fixed array. This pointer can be used to access and modify the contained
// elements.
pointer data() { return AsValueType(storage_.begin()); }
// FixedArray::operator[]
//
// Returns a reference the ith element of the fixed array.
// REQUIRES: 0 <= i < size()
// Returns a reference to the "i"th element.
inline const T& operator[](size_type i) const {
DCHECK_LT(i, size_);
return array_[i].element;
reference operator[](size_type i) {
DCHECK_LT(i, size());
return data()[i];
}
inline iterator begin() { return &array_[0].element; }
inline iterator end() { return &array_[size_].element; }
// Overload of FixedArray::operator()[] to return a const reference to the
// ith element of the fixed array.
// REQUIRES: 0 <= i < size()
const_reference operator[](size_type i) const {
DCHECK_LT(i, size());
return data()[i];
}
inline const_iterator begin() const { return &array_[0].element; }
inline const_iterator end() const { return &array_[size_].element; }
// FixedArray::front()
//
// Returns a reference to the first element of the fixed array.
reference front() { return *begin(); }
// Overload of FixedArray::front() to return a reference to the first element
// of a fixed array of const values.
const_reference front() const { return *begin(); }
// FixedArray::back()
//
// Returns a reference to the last element of the fixed array.
reference back() { return *(end() - 1); }
// Overload of FixedArray::back() to return a reference to the last element
// of a fixed array of const values.
const_reference back() const { return *(end() - 1); }
// FixedArray::begin()
//
// Returns an iterator to the beginning of the fixed array.
iterator begin() { return data(); }
// Overload of FixedArray::begin() to return a const iterator to the
// beginning of the fixed array.
const_iterator begin() const { return data(); }
// FixedArray::cbegin()
//
// Returns a const iterator to the beginning of the fixed array.
const_iterator cbegin() const { return begin(); }
// FixedArray::end()
//
// Returns an iterator to the end of the fixed array.
iterator end() { return data() + size(); }
// Overload of FixedArray::end() to return a const iterator to the end of the
// fixed array.
const_iterator end() const { return data() + size(); }
// FixedArray::cend()
//
// Returns a const iterator to the end of the fixed array.
const_iterator cend() const { return end(); }
// FixedArray::rbegin()
//
// Returns a reverse iterator from the end of the fixed array.
reverse_iterator rbegin() { return reverse_iterator(end()); }
// Overload of FixedArray::rbegin() to return a const reverse iterator from
// the end of the fixed array.
const_reverse_iterator rbegin() const {
return const_reverse_iterator(end());
}
// FixedArray::crbegin()
//
// Returns a const reverse iterator from the end of the fixed array.
const_reverse_iterator crbegin() const { return rbegin(); }
// FixedArray::rend()
//
// Returns a reverse iterator from the beginning of the fixed array.
reverse_iterator rend() { return reverse_iterator(begin()); }
// Overload of FixedArray::rend() for returning a const reverse iterator
// from the beginning of the fixed array.
const_reverse_iterator rend() const {
return const_reverse_iterator(begin());
}
// FixedArray::crend()
//
// Returns a reverse iterator from the beginning of the fixed array.
const_reverse_iterator crend() const { return rend(); }
// FixedArray::fill()
//
// Assigns the given `value` to all elements in the fixed array.
void fill(const value_type& val) { std::fill(begin(), end(), val); }
// Relational operators. Equality operators are elementwise using
// `operator==`, while order operators order FixedArrays lexicographically.
friend bool operator==(const FixedArray& lhs, const FixedArray& rhs) {
return std::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
}
friend bool operator!=(const FixedArray& lhs, const FixedArray& rhs) {
return !(lhs == rhs);
}
friend bool operator<(const FixedArray& lhs, const FixedArray& rhs) {
return std::lexicographical_compare(
lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
}
friend bool operator>(const FixedArray& lhs, const FixedArray& rhs) {
return rhs < lhs;
}
friend bool operator<=(const FixedArray& lhs, const FixedArray& rhs) {
return !(rhs < lhs);
}
friend bool operator>=(const FixedArray& lhs, const FixedArray& rhs) {
return !(lhs < rhs);
}
private:
// Container to hold elements of type T. This is necessary to handle
// the case where T is a a (C-style) array. The size of InnerContainer
// and T must be the same, otherwise callers' assumptions about use
// of this code will be broken.
struct InnerContainer {
T element;
// StorageElement
//
// For FixedArrays with a C-style-array value_type, StorageElement is a POD
// wrapper struct called StorageElementWrapper that holds the value_type
// instance inside. This is needed for construction and destruction of the
// entire array regardless of how many dimensions it has. For all other cases,
// StorageElement is just an alias of value_type.
//
// Maintainer's Note: The simpler solution would be to simply wrap value_type
// in a struct whether it's an array or not. That causes some paranoid
// diagnostics to misfire, believing that 'data()' returns a pointer to a
// single element, rather than the packed array that it really is.
// e.g.:
//
// FixedArray<char> buf(1);
// sprintf(buf.data(), "foo");
//
// error: call to int __builtin___sprintf_chk(etc...)
// will always overflow destination buffer [-Werror]
//
template <typename OuterT,
typename InnerT = typename std::remove_extent<OuterT>::type,
size_t InnerN = std::extent<OuterT>::value>
struct StorageElementWrapper {
InnerT array[InnerN];
};
// How many elements should we store inline?
// a. If not specified, use a default of 256 bytes (256 bytes
// seems small enough to not cause stack overflow or unnecessary
// stack pollution, while still allowing stack allocation for
// reasonably long character arrays.
// b. Never use 0 length arrays (not ISO C++)
static const size_type S1 = ((inline_elements < 0)
? (256/sizeof(T)) : inline_elements);
static const size_type S2 = (S1 <= 0) ? 1 : S1;
static const size_type kInlineElements = S2;
using StorageElement =
typename std::conditional<std::is_array<value_type>::value,
StorageElementWrapper<value_type>,
value_type>::type;
size_type const size_;
InnerContainer* const array_;
static pointer AsValueType(pointer ptr) { return ptr; }
static pointer AsValueType(StorageElementWrapper<value_type>* ptr) {
return std::addressof(ptr->array);
}
// Allocate some space, not an array of elements of type T, so that we can
// skip calling the T constructors and destructors for space we never use.
ManualConstructor<InnerContainer> inline_space_[kInlineElements];
static_assert(sizeof(StorageElement) == sizeof(value_type), "");
static_assert(alignof(StorageElement) == alignof(value_type), "");
class NonEmptyInlinedStorage {
public:
StorageElement* data() { return reinterpret_cast<StorageElement*>(buff_); }
void AnnotateConstruct(size_type) {}
void AnnotateDestruct(size_type) {}
// #ifdef ADDRESS_SANITIZER
// void* RedzoneBegin() { return &redzone_begin_; }
// void* RedzoneEnd() { return &redzone_end_ + 1; }
// #endif // ADDRESS_SANITIZER
private:
// ADDRESS_SANITIZER_REDZONE(redzone_begin_);
alignas(StorageElement) char buff_[sizeof(StorageElement[inline_elements])];
// ADDRESS_SANITIZER_REDZONE(redzone_end_);
};
class EmptyInlinedStorage {
public:
StorageElement* data() { return nullptr; }
void AnnotateConstruct(size_type) {}
void AnnotateDestruct(size_type) {}
};
using InlinedStorage =
typename std::conditional<inline_elements == 0,
EmptyInlinedStorage,
NonEmptyInlinedStorage>::type;
// Storage
//
// An instance of Storage manages the inline and out-of-line memory for
// instances of FixedArray. This guarantees that even when construction of
// individual elements fails in the FixedArray constructor body, the
// destructor for Storage will still be called and out-of-line memory will be
// properly deallocated.
//
class Storage : public InlinedStorage {
public:
Storage(size_type n, const allocator_type& a)
: size_alloc_(n, a), data_(InitializeData()) {}
~Storage() noexcept {
if (UsingInlinedStorage(size())) {
InlinedStorage::AnnotateDestruct(size());
} else {
AllocatorTraits::deallocate(alloc(), AsValueType(begin()), size());
}
}
size_type size() const { return std::get<0>(size_alloc_); }
StorageElement* begin() const { return data_; }
StorageElement* end() const { return begin() + size(); }
allocator_type& alloc() { return std::get<1>(size_alloc_); }
private:
static bool UsingInlinedStorage(size_type n) {
return n <= inline_elements;
}
StorageElement* InitializeData() {
if (UsingInlinedStorage(size())) {
InlinedStorage::AnnotateConstruct(size());
return InlinedStorage::data();
} else {
return reinterpret_cast<StorageElement*>(
AllocatorTraits::allocate(alloc(), size()));
}
}
// Using std::tuple and not absl::CompressedTuple, as it has a lot of
// dependencies to other absl headers.
std::tuple<size_type, allocator_type> size_alloc_;
StorageElement* data_;
};
Storage storage_;
};
// Implementation details follow
template <typename T, size_t N, typename A>
constexpr size_t FixedArray<T, N, A>::kInlineBytesDefault;
template <class T, ssize_t S>
inline FixedArray<T, S>::FixedArray(typename FixedArray<T, S>::size_type n)
: size_(n),
array_((n <= kInlineElements
? reinterpret_cast<InnerContainer*>(inline_space_)
: new InnerContainer[n])) {
// Construct only the elements actually used.
if (array_ == reinterpret_cast<InnerContainer*>(inline_space_)) {
for (size_t i = 0; i != size_; ++i) {
inline_space_[i].Init();
}
}
}
template <class T, ssize_t S>
inline FixedArray<T, S>::~FixedArray() {
if (array_ != reinterpret_cast<InnerContainer*>(inline_space_)) {
delete[] array_;
} else {
for (size_t i = 0; i != size_; ++i) {
inline_space_[i].Destroy();
}
}
}
template <typename T, size_t N, typename A>
constexpr typename FixedArray<T, N, A>::size_type
FixedArray<T, N, A>::inline_elements;
} // namespace internal
} // namespace ceres

View File

@@ -28,8 +28,8 @@
//
// Author: vitus@google.com (Michael Vitus)
#ifndef CERES_PUBLIC_HOUSEHOLDER_VECTOR_H_
#define CERES_PUBLIC_HOUSEHOLDER_VECTOR_H_
#ifndef CERES_PUBLIC_INTERNAL_HOUSEHOLDER_VECTOR_H_
#define CERES_PUBLIC_INTERNAL_HOUSEHOLDER_VECTOR_H_
#include "Eigen/Core"
#include "glog/logging.h"
@@ -42,12 +42,15 @@ namespace internal {
// vector as pivot instead of first. This computes the vector v with v(n) = 1
// and beta such that H = I - beta * v * v^T is orthogonal and
// H * x = ||x||_2 * e_n.
template <typename Scalar>
void ComputeHouseholderVector(const Eigen::Matrix<Scalar, Eigen::Dynamic, 1>& x,
Eigen::Matrix<Scalar, Eigen::Dynamic, 1>* v,
//
// NOTE: Some versions of MSVC have trouble deducing the type of v if
// you do not specify all the template arguments explicitly.
template <typename XVectorType, typename Scalar, int N>
void ComputeHouseholderVector(const XVectorType& x,
Eigen::Matrix<Scalar, N, 1>* v,
Scalar* beta) {
CHECK_NOTNULL(beta);
CHECK_NOTNULL(v);
CHECK(beta != nullptr);
CHECK(v != nullptr);
CHECK_GT(x.rows(), 1);
CHECK_EQ(x.rows(), v->rows());
@@ -82,4 +85,4 @@ void ComputeHouseholderVector(const Eigen::Matrix<Scalar, Eigen::Dynamic, 1>& x,
} // namespace internal
} // namespace ceres
#endif // CERES_PUBLIC_HOUSEHOLDER_VECTOR_H_
#endif // CERES_PUBLIC_INTERNAL_HOUSEHOLDER_VECTOR_H_

View File

@@ -0,0 +1,165 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2018 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: jodebo_beck@gmx.de (Johannes Beck)
//
// Algorithms to be used together with integer_sequence, like computing the sum
// or the exclusive scan (sometimes called exclusive prefix sum) at compile
// time.
#ifndef CERES_PUBLIC_INTERNAL_INTEGER_SEQUENCE_ALGORITHM_H_
#define CERES_PUBLIC_INTERNAL_INTEGER_SEQUENCE_ALGORITHM_H_
#include <utility>
namespace ceres {
namespace internal {
// Implementation of calculating the sum of an integer sequence.
// Recursively instantiate SumImpl and calculate the sum of the N first
// numbers. This reduces the number of instantiations and speeds up
// compilation.
//
// Examples:
// 1) integer_sequence<int, 5>:
// Value = 5
//
// 2) integer_sequence<int, 4, 2>:
// Value = 4 + 2 + SumImpl<integer_sequence<int>>::Value
// Value = 4 + 2 + 0
//
// 3) integer_sequence<int, 2, 1, 4>:
// Value = 2 + 1 + SumImpl<integer_sequence<int, 4>>::Value
// Value = 2 + 1 + 4
template <typename Seq>
struct SumImpl;
// Strip of and sum the first number.
template <typename T, T N, T... Ns>
struct SumImpl<std::integer_sequence<T, N, Ns...>> {
static constexpr T Value = N + SumImpl<std::integer_sequence<T, Ns...>>::Value;
};
// Strip of and sum the first two numbers.
template <typename T, T N1, T N2, T... Ns>
struct SumImpl<std::integer_sequence<T, N1, N2, Ns...>> {
static constexpr T Value =
N1 + N2 + SumImpl<std::integer_sequence<T, Ns...>>::Value;
};
// Strip of and sum the first four numbers.
template <typename T, T N1, T N2, T N3, T N4, T... Ns>
struct SumImpl<std::integer_sequence<T, N1, N2, N3, N4, Ns...>> {
static constexpr T Value =
N1 + N2 + N3 + N4 + SumImpl<std::integer_sequence<T, Ns...>>::Value;
};
// Only one number is left. 'Value' is just that number ('recursion' ends).
template <typename T, T N>
struct SumImpl<std::integer_sequence<T, N>> {
static constexpr T Value = N;
};
// No number is left. 'Value' is the identity element (for sum this is zero).
template <typename T>
struct SumImpl<std::integer_sequence<T>> {
static constexpr T Value = T(0);
};
// Calculate the sum of an integer sequence. The resulting sum will be stored in
// 'Value'.
template <typename Seq>
class Sum {
using T = typename Seq::value_type;
public:
static constexpr T Value = SumImpl<Seq>::Value;
};
// Implementation of calculating an exclusive scan (exclusive prefix sum) of an
// integer sequence. Exclusive means that the i-th input element is not included
// in the i-th sum. Calculating the exclusive scan for an input array I results
// in the following output R:
//
// R[0] = 0
// R[1] = I[0];
// R[2] = I[0] + I[1];
// R[3] = I[0] + I[1] + I[2];
// ...
//
// In C++17 std::exclusive_scan does the same operation at runtime (but
// cannot be used to calculate the prefix sum at compile time). See
// https://en.cppreference.com/w/cpp/algorithm/exclusive_scan for a more
// detailed description.
//
// Example for integer_sequence<int, 1, 4, 3> (seq := integer_sequence):
// T , Sum, Ns... , Rs...
// ExclusiveScanImpl<int, 0, seq<int, 1, 4, 3>, seq<int >>
// ExclusiveScanImpl<int, 1, seq<int, 4, 3>, seq<int, 0 >>
// ExclusiveScanImpl<int, 5, seq<int, 3>, seq<int, 0, 1 >>
// ExclusiveScanImpl<int, 8, seq<int >, seq<int, 0, 1, 5>>
// ^^^^^^^^^^^^^^^^^
// resulting sequence
template <typename T, T Sum, typename SeqIn, typename SeqOut>
struct ExclusiveScanImpl;
template <typename T, T Sum, T N, T... Ns, T... Rs>
struct ExclusiveScanImpl<T, Sum, std::integer_sequence<T, N, Ns...>,
std::integer_sequence<T, Rs...>> {
using Type =
typename ExclusiveScanImpl<T, Sum + N, std::integer_sequence<T, Ns...>,
std::integer_sequence<T, Rs..., Sum>>::Type;
};
// End of 'recursion'. The resulting type is SeqOut.
template <typename T, T Sum, typename SeqOut>
struct ExclusiveScanImpl<T, Sum, std::integer_sequence<T>, SeqOut> {
using Type = SeqOut;
};
// Calculates the exclusive scan of the specified integer sequence. The last
// element (the total) is not included in the resulting sequence so they have
// same length. This means the exclusive scan of integer_sequence<int, 1, 2, 3>
// will be integer_sequence<int, 0, 1, 3>.
template <typename Seq>
class ExclusiveScanT {
using T = typename Seq::value_type;
public:
using Type =
typename ExclusiveScanImpl<T, T(0), Seq, std::integer_sequence<T>>::Type;
};
// Helper to use exclusive scan without typename.
template <typename Seq>
using ExclusiveScan = typename ExclusiveScanT<Seq>::Type;
} // namespace internal
} // namespace ceres
#endif // CERES_PUBLIC_INTERNAL_INTEGER_SEQUENCE_ALGORITHM_H_

View File

@@ -0,0 +1,183 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2020 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: jodebo_beck@gmx.de (Johannes Beck)
//
#ifndef CERES_PUBLIC_INTERNAL_LINE_PARAMETERIZATION_H_
#define CERES_PUBLIC_INTERNAL_LINE_PARAMETERIZATION_H_
#include "householder_vector.h"
namespace ceres {
template <int AmbientSpaceDimension>
bool LineParameterization<AmbientSpaceDimension>::Plus(
const double* x_ptr,
const double* delta_ptr,
double* x_plus_delta_ptr) const {
// We seek a box plus operator of the form
//
// [o*, d*] = Plus([o, d], [delta_o, delta_d])
//
// where o is the origin point, d is the direction vector, delta_o is
// the delta of the origin point and delta_d the delta of the direction and
// o* and d* is the updated origin point and direction.
//
// We separate the Plus operator into the origin point and directional part
// d* = Plus_d(d, delta_d)
// o* = Plus_o(o, d, delta_o)
//
// The direction update function Plus_d is the same as for the homogeneous
// vector parameterization:
//
// d* = H_{v(d)} [0.5 sinc(0.5 |delta_d|) delta_d, cos(0.5 |delta_d|)]^T
//
// where H is the householder matrix
// H_{v} = I - (2 / |v|^2) v v^T
// and
// v(d) = d - sign(d_n) |d| e_n.
//
// The origin point update function Plus_o is defined as
//
// o* = o + H_{v(d)} [0.5 delta_o, 0]^T.
static constexpr int kDim = AmbientSpaceDimension;
using AmbientVector = Eigen::Matrix<double, kDim, 1>;
using AmbientVectorRef = Eigen::Map<Eigen::Matrix<double, kDim, 1>>;
using ConstAmbientVectorRef =
Eigen::Map<const Eigen::Matrix<double, kDim, 1>>;
using ConstTangentVectorRef =
Eigen::Map<const Eigen::Matrix<double, kDim - 1, 1>>;
ConstAmbientVectorRef o(x_ptr);
ConstAmbientVectorRef d(x_ptr + kDim);
ConstTangentVectorRef delta_o(delta_ptr);
ConstTangentVectorRef delta_d(delta_ptr + kDim - 1);
AmbientVectorRef o_plus_delta(x_plus_delta_ptr);
AmbientVectorRef d_plus_delta(x_plus_delta_ptr + kDim);
const double norm_delta_d = delta_d.norm();
o_plus_delta = o;
// Shortcut for zero delta direction.
if (norm_delta_d == 0.0) {
d_plus_delta = d;
if (delta_o.isZero(0.0)) {
return true;
}
}
// Calculate the householder transformation which is needed for f_d and f_o.
AmbientVector v;
double beta;
// NOTE: The explicit template arguments are needed here because
// ComputeHouseholderVector is templated and some versions of MSVC
// have trouble deducing the type of v automatically.
internal::ComputeHouseholderVector<ConstAmbientVectorRef, double, kDim>(
d, &v, &beta);
if (norm_delta_d != 0.0) {
// Map the delta from the minimum representation to the over parameterized
// homogeneous vector. See section A6.9.2 on page 624 of Hartley & Zisserman
// (2nd Edition) for a detailed description. Note there is a typo on Page
// 625, line 4 so check the book errata.
const double norm_delta_div_2 = 0.5 * norm_delta_d;
const double sin_delta_by_delta =
std::sin(norm_delta_div_2) / norm_delta_div_2;
// Apply the delta update to remain on the unit sphere. See section A6.9.3
// on page 625 of Hartley & Zisserman (2nd Edition) for a detailed
// description.
AmbientVector y;
y.template head<kDim - 1>() = 0.5 * sin_delta_by_delta * delta_d;
y[kDim - 1] = std::cos(norm_delta_div_2);
d_plus_delta = d.norm() * (y - v * (beta * (v.transpose() * y)));
}
// The null space is in the direction of the line, so the tangent space is
// perpendicular to the line direction. This is achieved by using the
// householder matrix of the direction and allow only movements
// perpendicular to e_n.
//
// The factor of 0.5 is used to be consistent with the line direction
// update.
AmbientVector y;
y << 0.5 * delta_o, 0;
o_plus_delta += y - v * (beta * (v.transpose() * y));
return true;
}
template <int AmbientSpaceDimension>
bool LineParameterization<AmbientSpaceDimension>::ComputeJacobian(
const double* x_ptr, double* jacobian_ptr) const {
static constexpr int kDim = AmbientSpaceDimension;
using AmbientVector = Eigen::Matrix<double, kDim, 1>;
using ConstAmbientVectorRef =
Eigen::Map<const Eigen::Matrix<double, kDim, 1>>;
using MatrixRef = Eigen::Map<
Eigen::Matrix<double, 2 * kDim, 2 * (kDim - 1), Eigen::RowMajor>>;
ConstAmbientVectorRef d(x_ptr + kDim);
MatrixRef jacobian(jacobian_ptr);
// Clear the Jacobian as only half of the matrix is not zero.
jacobian.setZero();
AmbientVector v;
double beta;
// NOTE: The explicit template arguments are needed here because
// ComputeHouseholderVector is templated and some versions of MSVC
// have trouble deducing the type of v automatically.
internal::ComputeHouseholderVector<ConstAmbientVectorRef, double, kDim>(
d, &v, &beta);
// The Jacobian is equal to J = 0.5 * H.leftCols(kDim - 1) where H is
// the Householder matrix (H = I - beta * v * v') for the origin point. For
// the line direction part the Jacobian is scaled by the norm of the
// direction.
for (int i = 0; i < kDim - 1; ++i) {
jacobian.block(0, i, kDim, 1) = -0.5 * beta * v(i) * v;
jacobian.col(i)(i) += 0.5;
}
jacobian.template block<kDim, kDim - 1>(kDim, kDim - 1) =
jacobian.template block<kDim, kDim - 1>(0, 0) * d.norm();
return true;
}
} // namespace ceres
#endif // CERES_PUBLIC_INTERNAL_LINE_PARAMETERIZATION_H_

View File

@@ -1,170 +0,0 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
//
// Various Google-specific macros.
//
// This code is compiled directly on many platforms, including client
// platforms like Windows, Mac, and embedded systems. Before making
// any changes here, make sure that you're not breaking any platforms.
#ifndef CERES_PUBLIC_INTERNAL_MACROS_H_
#define CERES_PUBLIC_INTERNAL_MACROS_H_
#include <cstddef> // For size_t.
// A macro to disallow the copy constructor and operator= functions
// This should be used in the private: declarations for a class
//
// For disallowing only assign or copy, write the code directly, but declare
// the intend in a comment, for example:
//
// void operator=(const TypeName&); // _DISALLOW_ASSIGN
// Note, that most uses of CERES_DISALLOW_ASSIGN and CERES_DISALLOW_COPY
// are broken semantically, one should either use disallow both or
// neither. Try to avoid these in new code.
#define CERES_DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&); \
void operator=(const TypeName&)
// A macro to disallow all the implicit constructors, namely the
// default constructor, copy constructor and operator= functions.
//
// This should be used in the private: declarations for a class
// that wants to prevent anyone from instantiating it. This is
// especially useful for classes containing only static methods.
#define CERES_DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
TypeName(); \
CERES_DISALLOW_COPY_AND_ASSIGN(TypeName)
// The arraysize(arr) macro returns the # of elements in an array arr.
// The expression is a compile-time constant, and therefore can be
// used in defining new arrays, for example. If you use arraysize on
// a pointer by mistake, you will get a compile-time error.
//
// One caveat is that arraysize() doesn't accept any array of an
// anonymous type or a type defined inside a function. In these rare
// cases, you have to use the unsafe ARRAYSIZE() macro below. This is
// due to a limitation in C++'s template system. The limitation might
// eventually be removed, but it hasn't happened yet.
// This template function declaration is used in defining arraysize.
// Note that the function doesn't need an implementation, as we only
// use its type.
template <typename T, size_t N>
char (&ArraySizeHelper(T (&array)[N]))[N];
// That gcc wants both of these prototypes seems mysterious. VC, for
// its part, can't decide which to use (another mystery). Matching of
// template overloads: the final frontier.
#ifndef _WIN32
template <typename T, size_t N>
char (&ArraySizeHelper(const T (&array)[N]))[N];
#endif
#define arraysize(array) (sizeof(ArraySizeHelper(array)))
// ARRAYSIZE performs essentially the same calculation as arraysize,
// but can be used on anonymous types or types defined inside
// functions. It's less safe than arraysize as it accepts some
// (although not all) pointers. Therefore, you should use arraysize
// whenever possible.
//
// The expression ARRAYSIZE(a) is a compile-time constant of type
// size_t.
//
// ARRAYSIZE catches a few type errors. If you see a compiler error
//
// "warning: division by zero in ..."
//
// when using ARRAYSIZE, you are (wrongfully) giving it a pointer.
// You should only use ARRAYSIZE on statically allocated arrays.
//
// The following comments are on the implementation details, and can
// be ignored by the users.
//
// ARRAYSIZE(arr) works by inspecting sizeof(arr) (the # of bytes in
// the array) and sizeof(*(arr)) (the # of bytes in one array
// element). If the former is divisible by the latter, perhaps arr is
// indeed an array, in which case the division result is the # of
// elements in the array. Otherwise, arr cannot possibly be an array,
// and we generate a compiler error to prevent the code from
// compiling.
//
// Since the size of bool is implementation-defined, we need to cast
// !(sizeof(a) & sizeof(*(a))) to size_t in order to ensure the final
// result has type size_t.
//
// This macro is not perfect as it wrongfully accepts certain
// pointers, namely where the pointer size is divisible by the pointee
// size. Since all our code has to go through a 32-bit compiler,
// where a pointer is 4 bytes, this means all pointers to a type whose
// size is 3 or greater than 4 will be (righteously) rejected.
//
// Kudos to Jorg Brown for this simple and elegant implementation.
//
// - wan 2005-11-16
//
// Starting with Visual C++ 2005, WinNT.h includes ARRAYSIZE. However,
// the definition comes from the over-broad windows.h header that
// introduces a macro, ERROR, that conflicts with the logging framework
// that Ceres uses. Instead, rename ARRAYSIZE to CERES_ARRAYSIZE.
#define CERES_ARRAYSIZE(a) \
((sizeof(a) / sizeof(*(a))) / \
static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
// Tell the compiler to warn about unused return values for functions
// declared with this macro. The macro should be used on function
// declarations following the argument list:
//
// Sprocket* AllocateSprocket() MUST_USE_RESULT;
//
#if (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) \
&& !defined(COMPILER_ICC)
#define CERES_MUST_USE_RESULT __attribute__ ((warn_unused_result))
#else
#define CERES_MUST_USE_RESULT
#endif
// Platform independent macros to get aligned memory allocations.
// For example
//
// MyFoo my_foo CERES_ALIGN_ATTRIBUTE(16);
//
// Gives us an instance of MyFoo which is aligned at a 16 byte
// boundary.
#if defined(_MSC_VER)
#define CERES_ALIGN_ATTRIBUTE(n) __declspec(align(n))
#define CERES_ALIGN_OF(T) __alignof(T)
#elif defined(__GNUC__)
#define CERES_ALIGN_ATTRIBUTE(n) __attribute__((aligned(n)))
#define CERES_ALIGN_OF(T) __alignof(T)
#endif
#endif // CERES_PUBLIC_INTERNAL_MACROS_H_

View File

@@ -1,208 +0,0 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: kenton@google.com (Kenton Varda)
//
// ManualConstructor statically-allocates space in which to store some
// object, but does not initialize it. You can then call the constructor
// and destructor for the object yourself as you see fit. This is useful
// for memory management optimizations, where you want to initialize and
// destroy an object multiple times but only allocate it once.
//
// (When I say ManualConstructor statically allocates space, I mean that
// the ManualConstructor object itself is forced to be the right size.)
#ifndef CERES_PUBLIC_INTERNAL_MANUAL_CONSTRUCTOR_H_
#define CERES_PUBLIC_INTERNAL_MANUAL_CONSTRUCTOR_H_
#include <new>
namespace ceres {
namespace internal {
// ------- Define CERES_ALIGNED_CHAR_ARRAY --------------------------------
#ifndef CERES_ALIGNED_CHAR_ARRAY
// Because MSVC and older GCCs require that the argument to their alignment
// construct to be a literal constant integer, we use a template instantiated
// at all the possible powers of two.
template<int alignment, int size> struct AlignType { };
template<int size> struct AlignType<0, size> { typedef char result[size]; };
#if !defined(CERES_ALIGN_ATTRIBUTE)
#define CERES_ALIGNED_CHAR_ARRAY you_must_define_CERES_ALIGNED_CHAR_ARRAY_for_your_compiler
#else // !defined(CERES_ALIGN_ATTRIBUTE)
#define CERES_ALIGN_TYPE_TEMPLATE(X) \
template<int size> struct AlignType<X, size> { \
typedef CERES_ALIGN_ATTRIBUTE(X) char result[size]; \
}
CERES_ALIGN_TYPE_TEMPLATE(1);
CERES_ALIGN_TYPE_TEMPLATE(2);
CERES_ALIGN_TYPE_TEMPLATE(4);
CERES_ALIGN_TYPE_TEMPLATE(8);
CERES_ALIGN_TYPE_TEMPLATE(16);
CERES_ALIGN_TYPE_TEMPLATE(32);
CERES_ALIGN_TYPE_TEMPLATE(64);
CERES_ALIGN_TYPE_TEMPLATE(128);
CERES_ALIGN_TYPE_TEMPLATE(256);
CERES_ALIGN_TYPE_TEMPLATE(512);
CERES_ALIGN_TYPE_TEMPLATE(1024);
CERES_ALIGN_TYPE_TEMPLATE(2048);
CERES_ALIGN_TYPE_TEMPLATE(4096);
CERES_ALIGN_TYPE_TEMPLATE(8192);
// Any larger and MSVC++ will complain.
#undef CERES_ALIGN_TYPE_TEMPLATE
#define CERES_ALIGNED_CHAR_ARRAY(T, Size) \
typename AlignType<CERES_ALIGN_OF(T), sizeof(T) * Size>::result
#endif // !defined(CERES_ALIGN_ATTRIBUTE)
#endif // CERES_ALIGNED_CHAR_ARRAY
template <typename Type>
class ManualConstructor {
public:
// No constructor or destructor because one of the most useful uses of
// this class is as part of a union, and members of a union cannot have
// constructors or destructors. And, anyway, the whole point of this
// class is to bypass these.
inline Type* get() {
return reinterpret_cast<Type*>(space_);
}
inline const Type* get() const {
return reinterpret_cast<const Type*>(space_);
}
inline Type* operator->() { return get(); }
inline const Type* operator->() const { return get(); }
inline Type& operator*() { return *get(); }
inline const Type& operator*() const { return *get(); }
// This is needed to get around the strict aliasing warning GCC generates.
inline void* space() {
return reinterpret_cast<void*>(space_);
}
// You can pass up to four constructor arguments as arguments of Init().
inline void Init() {
new(space()) Type;
}
template <typename T1>
inline void Init(const T1& p1) {
new(space()) Type(p1);
}
template <typename T1, typename T2>
inline void Init(const T1& p1, const T2& p2) {
new(space()) Type(p1, p2);
}
template <typename T1, typename T2, typename T3>
inline void Init(const T1& p1, const T2& p2, const T3& p3) {
new(space()) Type(p1, p2, p3);
}
template <typename T1, typename T2, typename T3, typename T4>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4) {
new(space()) Type(p1, p2, p3, p4);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5) {
new(space()) Type(p1, p2, p3, p4, p5);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
typename T6>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6) {
new(space()) Type(p1, p2, p3, p4, p5, p6);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
typename T6, typename T7>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6, const T7& p7) {
new(space()) Type(p1, p2, p3, p4, p5, p6, p7);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
typename T6, typename T7, typename T8>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6, const T7& p7, const T8& p8) {
new(space()) Type(p1, p2, p3, p4, p5, p6, p7, p8);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
typename T6, typename T7, typename T8, typename T9>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6, const T7& p7, const T8& p8,
const T9& p9) {
new(space()) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
typename T6, typename T7, typename T8, typename T9, typename T10>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6, const T7& p7, const T8& p8,
const T9& p9, const T10& p10) {
new(space()) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
typename T6, typename T7, typename T8, typename T9, typename T10,
typename T11>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6, const T7& p7, const T8& p8,
const T9& p9, const T10& p10, const T11& p11) {
new(space()) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11);
}
inline void Destroy() {
get()->~Type();
}
private:
CERES_ALIGNED_CHAR_ARRAY(Type, 1) space_;
};
#undef CERES_ALIGNED_CHAR_ARRAY
} // namespace internal
} // namespace ceres
#endif // CERES_PUBLIC_INTERNAL_MANUAL_CONSTRUCTOR_H_

View File

@@ -0,0 +1,90 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: memory.h
// -----------------------------------------------------------------------------
//
// This header file contains utility functions for managing the creation and
// conversion of smart pointers. This file is an extension to the C++
// standard <memory> library header file.
#ifndef CERES_PUBLIC_INTERNAL_MEMORY_H_
#define CERES_PUBLIC_INTERNAL_MEMORY_H_
#include <memory>
#ifdef CERES_HAVE_EXCEPTIONS
#define CERES_INTERNAL_TRY try
#define CERES_INTERNAL_CATCH_ANY catch (...)
#define CERES_INTERNAL_RETHROW \
do { \
throw; \
} while (false)
#else // CERES_HAVE_EXCEPTIONS
#define CERES_INTERNAL_TRY if (true)
#define CERES_INTERNAL_CATCH_ANY else if (false)
#define CERES_INTERNAL_RETHROW \
do { \
} while (false)
#endif // CERES_HAVE_EXCEPTIONS
namespace ceres {
namespace internal {
template <typename Allocator, typename Iterator, typename... Args>
void ConstructRange(Allocator& alloc,
Iterator first,
Iterator last,
const Args&... args) {
for (Iterator cur = first; cur != last; ++cur) {
CERES_INTERNAL_TRY {
std::allocator_traits<Allocator>::construct(
alloc, std::addressof(*cur), args...);
}
CERES_INTERNAL_CATCH_ANY {
while (cur != first) {
--cur;
std::allocator_traits<Allocator>::destroy(alloc, std::addressof(*cur));
}
CERES_INTERNAL_RETHROW;
}
}
}
template <typename Allocator, typename Iterator, typename InputIterator>
void CopyRange(Allocator& alloc,
Iterator destination,
InputIterator first,
InputIterator last) {
for (Iterator cur = destination; first != last;
static_cast<void>(++cur), static_cast<void>(++first)) {
CERES_INTERNAL_TRY {
std::allocator_traits<Allocator>::construct(
alloc, std::addressof(*cur), *first);
}
CERES_INTERNAL_CATCH_ANY {
while (cur != destination) {
--cur;
std::allocator_traits<Allocator>::destroy(alloc, std::addressof(*cur));
}
CERES_INTERNAL_RETHROW;
}
}
}
} // namespace internal
} // namespace ceres
#endif // CERES_PUBLIC_INTERNAL_MEMORY_H_

View File

@@ -36,12 +36,12 @@
#define CERES_PUBLIC_INTERNAL_NUMERIC_DIFF_H_
#include <cstring>
#include <utility>
#include "Eigen/Dense"
#include "Eigen/StdVector"
#include "ceres/cost_function.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/internal/variadic_evaluate.h"
#include "ceres/numeric_diff_options.h"
#include "ceres/types.h"
@@ -51,42 +51,11 @@
namespace ceres {
namespace internal {
// Helper templates that allow evaluation of a variadic functor or a
// CostFunction object.
template <typename CostFunctor,
int N0, int N1, int N2, int N3, int N4,
int N5, int N6, int N7, int N8, int N9 >
bool EvaluateImpl(const CostFunctor* functor,
double const* const* parameters,
double* residuals,
const void* /* NOT USED */) {
return VariadicEvaluate<CostFunctor,
double,
N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>::Call(
*functor,
parameters,
residuals);
}
template <typename CostFunctor,
int N0, int N1, int N2, int N3, int N4,
int N5, int N6, int N7, int N8, int N9 >
bool EvaluateImpl(const CostFunctor* functor,
double const* const* parameters,
double* residuals,
const CostFunction* /* NOT USED */) {
return functor->Evaluate(parameters, residuals, NULL);
}
// This is split from the main class because C++ doesn't allow partial template
// specializations for member functions. The alternative is to repeat the main
// class for differing numbers of parameters, which is also unfortunate.
template <typename CostFunctor,
NumericDiffMethodType kMethod,
int kNumResiduals,
int N0, int N1, int N2, int N3, int N4,
int N5, int N6, int N7, int N8, int N9,
int kParameterBlock,
template <typename CostFunctor, NumericDiffMethodType kMethod,
int kNumResiduals, typename ParameterDims, int kParameterBlock,
int kParameterBlockSize>
struct NumericDiff {
// Mutates parameters but must restore them before return.
@@ -104,6 +73,8 @@ struct NumericDiff {
using Eigen::RowMajor;
using Eigen::ColMajor;
DCHECK(jacobian);
const int num_residuals_internal =
(kNumResiduals != ceres::DYNAMIC ? kNumResiduals : num_residuals);
const int parameter_block_index_internal =
@@ -155,7 +126,7 @@ struct NumericDiff {
// compute the derivative for that parameter.
FixedArray<double> temp_residual_array(num_residuals_internal);
FixedArray<double> residual_array(num_residuals_internal);
Map<ResidualVector> residuals(residual_array.get(),
Map<ResidualVector> residuals(residual_array.data(),
num_residuals_internal);
for (int j = 0; j < parameter_block_size_internal; ++j) {
@@ -170,8 +141,8 @@ struct NumericDiff {
residuals_at_eval_point,
parameters,
x_plus_delta.data(),
temp_residual_array.get(),
residual_array.get())) {
temp_residual_array.data(),
residual_array.data())) {
return false;
}
} else {
@@ -182,8 +153,8 @@ struct NumericDiff {
residuals_at_eval_point,
parameters,
x_plus_delta.data(),
temp_residual_array.get(),
residual_array.get())) {
temp_residual_array.data(),
residual_array.data())) {
return false;
}
}
@@ -220,8 +191,9 @@ struct NumericDiff {
// Mutate 1 element at a time and then restore.
x_plus_delta(parameter_index) = x(parameter_index) + delta;
if (!EvaluateImpl<CostFunctor, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>(
functor, parameters, residuals.data(), functor)) {
if (!VariadicEvaluate<ParameterDims>(*functor,
parameters,
residuals.data())) {
return false;
}
@@ -234,8 +206,9 @@ struct NumericDiff {
// Compute the function on the other side of x(parameter_index).
x_plus_delta(parameter_index) = x(parameter_index) - delta;
if (!EvaluateImpl<CostFunctor, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>(
functor, parameters, temp_residuals.data(), functor)) {
if (!VariadicEvaluate<ParameterDims>(*functor,
parameters,
temp_residuals.data())) {
return false;
}
@@ -407,35 +380,116 @@ struct NumericDiff {
}
};
template <typename CostFunctor,
NumericDiffMethodType kMethod,
int kNumResiduals,
int N0, int N1, int N2, int N3, int N4,
int N5, int N6, int N7, int N8, int N9,
int kParameterBlock>
struct NumericDiff<CostFunctor, kMethod, kNumResiduals,
N0, N1, N2, N3, N4, N5, N6, N7, N8, N9,
kParameterBlock, 0> {
// Mutates parameters but must restore them before return.
static bool EvaluateJacobianForParameterBlock(
const CostFunctor* functor,
const double* residuals_at_eval_point,
const NumericDiffOptions& options,
const int num_residuals,
const int parameter_block_index,
const int parameter_block_size,
double **parameters,
double *jacobian) {
// Silence unused parameter compiler warnings.
(void)functor;
(void)residuals_at_eval_point;
(void)options;
(void)num_residuals;
(void)parameter_block_index;
(void)parameter_block_size;
(void)parameters;
(void)jacobian;
LOG(FATAL) << "Control should never reach here.";
// This function calls NumericDiff<...>::EvaluateJacobianForParameterBlock for
// each parameter block.
//
// Example:
// A call to
// EvaluateJacobianForParameterBlocks<StaticParameterDims<2, 3>>(
// functor,
// residuals_at_eval_point,
// options,
// num_residuals,
// parameters,
// jacobians);
// will result in the following calls to
// NumericDiff<...>::EvaluateJacobianForParameterBlock:
//
// if (jacobians[0] != nullptr) {
// if (!NumericDiff<
// CostFunctor,
// method,
// kNumResiduals,
// StaticParameterDims<2, 3>,
// 0,
// 2>::EvaluateJacobianForParameterBlock(functor,
// residuals_at_eval_point,
// options,
// num_residuals,
// 0,
// 2,
// parameters,
// jacobians[0])) {
// return false;
// }
// }
// if (jacobians[1] != nullptr) {
// if (!NumericDiff<
// CostFunctor,
// method,
// kNumResiduals,
// StaticParameterDims<2, 3>,
// 1,
// 3>::EvaluateJacobianForParameterBlock(functor,
// residuals_at_eval_point,
// options,
// num_residuals,
// 1,
// 3,
// parameters,
// jacobians[1])) {
// return false;
// }
// }
template <typename ParameterDims,
typename Parameters = typename ParameterDims::Parameters,
int ParameterIdx = 0>
struct EvaluateJacobianForParameterBlocks;
template <typename ParameterDims, int N, int... Ns, int ParameterIdx>
struct EvaluateJacobianForParameterBlocks<ParameterDims,
std::integer_sequence<int, N, Ns...>,
ParameterIdx> {
template <NumericDiffMethodType method,
int kNumResiduals,
typename CostFunctor>
static bool Apply(const CostFunctor* functor,
const double* residuals_at_eval_point,
const NumericDiffOptions& options,
int num_residuals,
double** parameters,
double** jacobians) {
if (jacobians[ParameterIdx] != nullptr) {
if (!NumericDiff<
CostFunctor,
method,
kNumResiduals,
ParameterDims,
ParameterIdx,
N>::EvaluateJacobianForParameterBlock(functor,
residuals_at_eval_point,
options,
num_residuals,
ParameterIdx,
N,
parameters,
jacobians[ParameterIdx])) {
return false;
}
}
return EvaluateJacobianForParameterBlocks<ParameterDims,
std::integer_sequence<int, Ns...>,
ParameterIdx + 1>::
template Apply<method, kNumResiduals>(functor,
residuals_at_eval_point,
options,
num_residuals,
parameters,
jacobians);
}
};
// End of 'recursion'. Nothing more to do.
template <typename ParameterDims, int ParameterIdx>
struct EvaluateJacobianForParameterBlocks<ParameterDims, std::integer_sequence<int>,
ParameterIdx> {
template <NumericDiffMethodType method, int kNumResiduals,
typename CostFunctor>
static bool Apply(const CostFunctor* /* NOT USED*/,
const double* /* NOT USED*/,
const NumericDiffOptions& /* NOT USED*/, int /* NOT USED*/,
double** /* NOT USED*/, double** /* NOT USED*/) {
return true;
}
};

View File

@@ -0,0 +1,124 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2018 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: jodebo_beck@gmx.de (Johannes Beck)
#ifndef CERES_PUBLIC_INTERNAL_PARAMETER_DIMS_H_
#define CERES_PUBLIC_INTERNAL_PARAMETER_DIMS_H_
#include <array>
#include <utility>
#include "ceres/internal/integer_sequence_algorithm.h"
namespace ceres {
namespace internal {
// Checks, whether the given parameter block sizes are valid. Valid means every
// dimension is bigger than zero.
constexpr bool IsValidParameterDimensionSequence(std::integer_sequence<int>) {
return true;
}
template <int N, int... Ts>
constexpr bool IsValidParameterDimensionSequence(
std::integer_sequence<int, N, Ts...>) {
return (N <= 0) ? false
: IsValidParameterDimensionSequence(
std::integer_sequence<int, Ts...>());
}
// Helper class that represents the parameter dimensions. The parameter
// dimensions are either dynamic or the sizes are known at compile time. It is
// used to pass parameter block dimensions around (e.g. between functions or
// classes).
//
// As an example if one have three parameter blocks with dimensions (2, 4, 1),
// one would use 'StaticParameterDims<2, 4, 1>' which is a synonym for
// 'ParameterDims<false, 2, 4, 1>'.
// For dynamic parameter dims, one would just use 'DynamicParameterDims', which
// is a synonym for 'ParameterDims<true>'.
template <bool IsDynamic, int... Ns>
class ParameterDims {
public:
using Parameters = std::integer_sequence<int, Ns...>;
// The parameter dimensions are only valid if all parameter block dimensions
// are greater than zero.
static constexpr bool kIsValid =
IsValidParameterDimensionSequence(Parameters());
static_assert(kIsValid,
"Invalid parameter block dimension detected. Each parameter "
"block dimension must be bigger than zero.");
static constexpr bool kIsDynamic = IsDynamic;
static constexpr int kNumParameterBlocks = sizeof...(Ns);
static_assert(kIsDynamic || kNumParameterBlocks > 0,
"At least one parameter block must be specified.");
static constexpr int kNumParameters =
Sum<std::integer_sequence<int, Ns...>>::Value;
static constexpr int GetDim(int dim) { return params_[dim]; }
// If one has all parameters packed into a single array this function unpacks
// the parameters.
template <typename T>
static inline std::array<T*, kNumParameterBlocks> GetUnpackedParameters(
T* ptr) {
using Offsets = ExclusiveScan<Parameters>;
return GetUnpackedParameters(ptr, Offsets());
}
private:
template <typename T, int... Indices>
static inline std::array<T*, kNumParameterBlocks> GetUnpackedParameters(
T* ptr, std::integer_sequence<int, Indices...>) {
return std::array<T*, kNumParameterBlocks>{{ptr + Indices...}};
}
static constexpr std::array<int, kNumParameterBlocks> params_{Ns...};
};
// Even static constexpr member variables needs to be defined (not only
// declared). As the ParameterDims class is tempalted this definition must
// be in the header file.
template <bool IsDynamic, int... Ns>
constexpr std::array<int, ParameterDims<IsDynamic, Ns...>::kNumParameterBlocks>
ParameterDims<IsDynamic, Ns...>::params_;
// Using declarations for static and dynamic parameter dims. This makes client
// code easier to read.
template <int... Ns>
using StaticParameterDims = ParameterDims<false, Ns...>;
using DynamicParameterDims = ParameterDims<true>;
} // namespace internal
} // namespace ceres
#endif // CERES_PUBLIC_INTERNAL_PARAMETER_DIMS_H_

View File

@@ -32,46 +32,42 @@
#define CERES_PUBLIC_INTERNAL_PORT_H_
// This file needs to compile as c code.
#ifdef __cplusplus
#include <cstddef>
#include "ceres/internal/config.h"
#if defined(CERES_TR1_MEMORY_HEADER)
#include <tr1/memory>
#if defined(CERES_USE_OPENMP)
# if defined(CERES_USE_CXX_THREADS) || defined(CERES_NO_THREADS)
# error CERES_USE_OPENMP is mutually exclusive to CERES_USE_CXX_THREADS and CERES_NO_THREADS
# endif
#elif defined(CERES_USE_CXX_THREADS)
# if defined(CERES_USE_OPENMP) || defined(CERES_NO_THREADS)
# error CERES_USE_CXX_THREADS is mutually exclusive to CERES_USE_OPENMP, CERES_USE_CXX_THREADS and CERES_NO_THREADS
# endif
#elif defined(CERES_NO_THREADS)
# if defined(CERES_USE_OPENMP) || defined(CERES_USE_CXX_THREADS)
# error CERES_NO_THREADS is mutually exclusive to CERES_USE_OPENMP and CERES_USE_CXX_THREADS
# endif
#else
#include <memory>
# error One of CERES_USE_OPENMP, CERES_USE_CXX_THREADS or CERES_NO_THREADS must be defined.
#endif
namespace ceres {
#if defined(CERES_TR1_SHARED_PTR)
using std::tr1::shared_ptr;
#else
using std::shared_ptr;
// CERES_NO_SPARSE should be automatically defined by config.h if Ceres was
// compiled without any sparse back-end. Verify that it has not subsequently
// been inconsistently redefined.
#if defined(CERES_NO_SPARSE)
# if !defined(CERES_NO_SUITESPARSE)
# error CERES_NO_SPARSE requires CERES_NO_SUITESPARSE.
# endif
# if !defined(CERES_NO_CXSPARSE)
# error CERES_NO_SPARSE requires CERES_NO_CXSPARSE
# endif
# if !defined(CERES_NO_ACCELERATE_SPARSE)
# error CERES_NO_SPARSE requires CERES_NO_ACCELERATE_SPARSE
# endif
# if defined(CERES_USE_EIGEN_SPARSE)
# error CERES_NO_SPARSE requires !CERES_USE_EIGEN_SPARSE
# endif
#endif
// We allocate some Eigen objects on the stack and other places they
// might not be aligned to 16-byte boundaries. If we have C++11, we
// can specify their alignment anyway, and thus can safely enable
// vectorization on those matrices; in C++99, we are out of luck. Figure out
// what case we're in and write macros that do the right thing.
#ifdef CERES_USE_CXX11
namespace port_constants {
static constexpr size_t kMaxAlignBytes =
// Work around a GCC 4.8 bug
// (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56019) where
// std::max_align_t is misplaced.
#if defined (__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 8
alignof(::max_align_t);
#else
alignof(std::max_align_t);
#endif
} // namespace port_constants
#endif
} // namespace ceres
#endif // __cplusplus
// A macro to signal which functions and classes are exported when
// building a DLL with MSVC.
//

View File

@@ -1,310 +0,0 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: jorg@google.com (Jorg Brown)
//
// This is an implementation designed to match the anticipated future TR2
// implementation of the scoped_ptr class, and its closely-related brethren,
// scoped_array, scoped_ptr_malloc, and make_scoped_ptr.
#ifndef CERES_PUBLIC_INTERNAL_SCOPED_PTR_H_
#define CERES_PUBLIC_INTERNAL_SCOPED_PTR_H_
#include <assert.h>
#include <stdlib.h>
#include <cstddef>
#include <algorithm>
namespace ceres {
namespace internal {
template <class C> class scoped_ptr;
template <class C, class Free> class scoped_ptr_malloc;
template <class C> class scoped_array;
template <class C>
scoped_ptr<C> make_scoped_ptr(C *);
// A scoped_ptr<T> is like a T*, except that the destructor of
// scoped_ptr<T> automatically deletes the pointer it holds (if
// any). That is, scoped_ptr<T> owns the T object that it points
// to. Like a T*, a scoped_ptr<T> may hold either NULL or a pointer to
// a T object. Also like T*, scoped_ptr<T> is thread-compatible, and
// once you dereference it, you get the threadsafety guarantees of T.
//
// The size of a scoped_ptr is small: sizeof(scoped_ptr<C>) == sizeof(C*)
template <class C>
class scoped_ptr {
public:
// The element type
typedef C element_type;
// Constructor. Defaults to intializing with NULL.
// There is no way to create an uninitialized scoped_ptr.
// The input parameter must be allocated with new.
explicit scoped_ptr(C* p = NULL) : ptr_(p) { }
// Destructor. If there is a C object, delete it.
// We don't need to test ptr_ == NULL because C++ does that for us.
~scoped_ptr() {
enum { type_must_be_complete = sizeof(C) };
delete ptr_;
}
// Reset. Deletes the current owned object, if any.
// Then takes ownership of a new object, if given.
// this->reset(this->get()) works.
void reset(C* p = NULL) {
if (p != ptr_) {
enum { type_must_be_complete = sizeof(C) };
delete ptr_;
ptr_ = p;
}
}
// Accessors to get the owned object.
// operator* and operator-> will assert() if there is no current object.
C& operator*() const {
assert(ptr_ != NULL);
return *ptr_;
}
C* operator->() const {
assert(ptr_ != NULL);
return ptr_;
}
C* get() const { return ptr_; }
// Comparison operators.
// These return whether a scoped_ptr and a raw pointer refer to
// the same object, not just to two different but equal objects.
bool operator==(const C* p) const { return ptr_ == p; }
bool operator!=(const C* p) const { return ptr_ != p; }
// Swap two scoped pointers.
void swap(scoped_ptr& p2) {
C* tmp = ptr_;
ptr_ = p2.ptr_;
p2.ptr_ = tmp;
}
// Release a pointer.
// The return value is the current pointer held by this object.
// If this object holds a NULL pointer, the return value is NULL.
// After this operation, this object will hold a NULL pointer,
// and will not own the object any more.
C* release() {
C* retVal = ptr_;
ptr_ = NULL;
return retVal;
}
private:
C* ptr_;
// google3 friend class that can access copy ctor (although if it actually
// calls a copy ctor, there will be a problem) see below
friend scoped_ptr<C> make_scoped_ptr<C>(C *p);
// Forbid comparison of scoped_ptr types. If C2 != C, it totally doesn't
// make sense, and if C2 == C, it still doesn't make sense because you should
// never have the same object owned by two different scoped_ptrs.
template <class C2> bool operator==(scoped_ptr<C2> const& p2) const;
template <class C2> bool operator!=(scoped_ptr<C2> const& p2) const;
// Disallow evil constructors
scoped_ptr(const scoped_ptr&);
void operator=(const scoped_ptr&);
};
// Free functions
template <class C>
inline void swap(scoped_ptr<C>& p1, scoped_ptr<C>& p2) {
p1.swap(p2);
}
template <class C>
inline bool operator==(const C* p1, const scoped_ptr<C>& p2) {
return p1 == p2.get();
}
template <class C>
inline bool operator==(const C* p1, const scoped_ptr<const C>& p2) {
return p1 == p2.get();
}
template <class C>
inline bool operator!=(const C* p1, const scoped_ptr<C>& p2) {
return p1 != p2.get();
}
template <class C>
inline bool operator!=(const C* p1, const scoped_ptr<const C>& p2) {
return p1 != p2.get();
}
template <class C>
scoped_ptr<C> make_scoped_ptr(C *p) {
// This does nothing but to return a scoped_ptr of the type that the passed
// pointer is of. (This eliminates the need to specify the name of T when
// making a scoped_ptr that is used anonymously/temporarily.) From an
// access control point of view, we construct an unnamed scoped_ptr here
// which we return and thus copy-construct. Hence, we need to have access
// to scoped_ptr::scoped_ptr(scoped_ptr const &). However, it is guaranteed
// that we never actually call the copy constructor, which is a good thing
// as we would call the temporary's object destructor (and thus delete p)
// if we actually did copy some object, here.
return scoped_ptr<C>(p);
}
// scoped_array<C> is like scoped_ptr<C>, except that the caller must allocate
// with new [] and the destructor deletes objects with delete [].
//
// As with scoped_ptr<C>, a scoped_array<C> either points to an object
// or is NULL. A scoped_array<C> owns the object that it points to.
// scoped_array<T> is thread-compatible, and once you index into it,
// the returned objects have only the threadsafety guarantees of T.
//
// Size: sizeof(scoped_array<C>) == sizeof(C*)
template <class C>
class scoped_array {
public:
// The element type
typedef C element_type;
// Constructor. Defaults to intializing with NULL.
// There is no way to create an uninitialized scoped_array.
// The input parameter must be allocated with new [].
explicit scoped_array(C* p = NULL) : array_(p) { }
// Destructor. If there is a C object, delete it.
// We don't need to test ptr_ == NULL because C++ does that for us.
~scoped_array() {
enum { type_must_be_complete = sizeof(C) };
delete[] array_;
}
// Reset. Deletes the current owned object, if any.
// Then takes ownership of a new object, if given.
// this->reset(this->get()) works.
void reset(C* p = NULL) {
if (p != array_) {
enum { type_must_be_complete = sizeof(C) };
delete[] array_;
array_ = p;
}
}
// Get one element of the current object.
// Will assert() if there is no current object, or index i is negative.
C& operator[](std::ptrdiff_t i) const {
assert(i >= 0);
assert(array_ != NULL);
return array_[i];
}
// Get a pointer to the zeroth element of the current object.
// If there is no current object, return NULL.
C* get() const {
return array_;
}
// Comparison operators.
// These return whether a scoped_array and a raw pointer refer to
// the same array, not just to two different but equal arrays.
bool operator==(const C* p) const { return array_ == p; }
bool operator!=(const C* p) const { return array_ != p; }
// Swap two scoped arrays.
void swap(scoped_array& p2) {
C* tmp = array_;
array_ = p2.array_;
p2.array_ = tmp;
}
// Release an array.
// The return value is the current pointer held by this object.
// If this object holds a NULL pointer, the return value is NULL.
// After this operation, this object will hold a NULL pointer,
// and will not own the object any more.
C* release() {
C* retVal = array_;
array_ = NULL;
return retVal;
}
private:
C* array_;
// Forbid comparison of different scoped_array types.
template <class C2> bool operator==(scoped_array<C2> const& p2) const;
template <class C2> bool operator!=(scoped_array<C2> const& p2) const;
// Disallow evil constructors
scoped_array(const scoped_array&);
void operator=(const scoped_array&);
};
// Free functions
template <class C>
inline void swap(scoped_array<C>& p1, scoped_array<C>& p2) {
p1.swap(p2);
}
template <class C>
inline bool operator==(const C* p1, const scoped_array<C>& p2) {
return p1 == p2.get();
}
template <class C>
inline bool operator==(const C* p1, const scoped_array<const C>& p2) {
return p1 == p2.get();
}
template <class C>
inline bool operator!=(const C* p1, const scoped_array<C>& p2) {
return p1 != p2.get();
}
template <class C>
inline bool operator!=(const C* p1, const scoped_array<const C>& p2) {
return p1 != p2.get();
}
// This class wraps the c library function free() in a class that can be
// passed as a template argument to scoped_ptr_malloc below.
class ScopedPtrMallocFree {
public:
inline void operator()(void* x) const {
free(x);
}
};
} // namespace internal
} // namespace ceres
#endif // CERES_PUBLIC_INTERNAL_SCOPED_PTR_H_

View File

@@ -28,165 +28,77 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
// mierle@gmail.com (Keir Mierle)
// jodebo_beck@gmx.de (Johannes Beck)
#ifndef CERES_PUBLIC_INTERNAL_VARIADIC_EVALUATE_H_
#define CERES_PUBLIC_INTERNAL_VARIADIC_EVALUATE_H_
#include <stddef.h>
#include "ceres/jet.h"
#include "ceres/types.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/fixed_array.h"
#include "glog/logging.h"
#include <type_traits>
#include <utility>
#include "ceres/cost_function.h"
#include "ceres/internal/parameter_dims.h"
namespace ceres {
namespace internal {
// This block of quasi-repeated code calls the user-supplied functor, which may
// take a variable number of arguments. This is accomplished by specializing the
// struct based on the size of the trailing parameters; parameters with 0 size
// are assumed missing.
template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4,
int N5, int N6, int N7, int N8, int N9>
struct VariadicEvaluate {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
input[1],
input[2],
input[3],
input[4],
input[5],
input[6],
input[7],
input[8],
input[9],
output);
}
};
// For fixed size cost functors
template <typename Functor, typename T, int... Indices>
inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
T* output, std::false_type /*is_dynamic*/,
std::integer_sequence<int, Indices...>) {
static_assert(sizeof...(Indices),
"Invalid number of parameter blocks. At least one parameter "
"block must be specified.");
return functor(input[Indices]..., output);
}
template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4,
int N5, int N6, int N7, int N8>
struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, N5, N6, N7, N8, 0> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
input[1],
input[2],
input[3],
input[4],
input[5],
input[6],
input[7],
input[8],
output);
}
};
// For dynamic sized cost functors
template <typename Functor, typename T>
inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
T* output, std::true_type /*is_dynamic*/,
std::integer_sequence<int>) {
return functor(input, output);
}
template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4,
int N5, int N6, int N7>
struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, N5, N6, N7, 0, 0> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
input[1],
input[2],
input[3],
input[4],
input[5],
input[6],
input[7],
output);
}
};
// For ceres cost functors (not ceres::CostFunction)
template <typename ParameterDims, typename Functor, typename T>
inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
T* output, const void* /* NOT USED */) {
using ParameterBlockIndices =
std::make_integer_sequence<int, ParameterDims::kNumParameterBlocks>;
using IsDynamic = std::integral_constant<bool, ParameterDims::kIsDynamic>;
return VariadicEvaluateImpl(functor, input, output, IsDynamic(),
ParameterBlockIndices());
}
template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4,
int N5, int N6>
struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, N5, N6, 0, 0, 0> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
input[1],
input[2],
input[3],
input[4],
input[5],
input[6],
output);
}
};
// For ceres::CostFunction
template <typename ParameterDims, typename Functor, typename T>
inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
T* output,
const CostFunction* /* NOT USED */) {
return functor.Evaluate(input, output, nullptr);
}
template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4,
int N5>
struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, N5, 0, 0, 0, 0> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
input[1],
input[2],
input[3],
input[4],
input[5],
output);
}
};
template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4>
struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, 0, 0, 0, 0, 0> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
input[1],
input[2],
input[3],
input[4],
output);
}
};
template<typename Functor, typename T, int N0, int N1, int N2, int N3>
struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, 0, 0, 0, 0, 0, 0> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
input[1],
input[2],
input[3],
output);
}
};
template<typename Functor, typename T, int N0, int N1, int N2>
struct VariadicEvaluate<Functor, T, N0, N1, N2, 0, 0, 0, 0, 0, 0, 0> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
input[1],
input[2],
output);
}
};
template<typename Functor, typename T, int N0, int N1>
struct VariadicEvaluate<Functor, T, N0, N1, 0, 0, 0, 0, 0, 0, 0, 0> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
input[1],
output);
}
};
template<typename Functor, typename T, int N0>
struct VariadicEvaluate<Functor, T, N0, 0, 0, 0, 0, 0, 0, 0, 0, 0> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
output);
}
};
// Template instantiation for dynamically-sized functors.
template<typename Functor, typename T>
struct VariadicEvaluate<Functor, T, ceres::DYNAMIC, ceres::DYNAMIC,
ceres::DYNAMIC, ceres::DYNAMIC, ceres::DYNAMIC,
ceres::DYNAMIC, ceres::DYNAMIC, ceres::DYNAMIC,
ceres::DYNAMIC, ceres::DYNAMIC> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input, output);
}
};
// Variadic evaluate is a helper function to evaluate ceres cost function or
// functors using an input, output and the parameter dimensions. There are
// several ways different possibilities:
// 1) If the passed functor is a 'ceres::CostFunction' its evaluate method is
// called.
// 2) If the functor is not a 'ceres::CostFunction' and the specified parameter
// dims is dynamic, the functor must have the following signature
// 'bool(T const* const* input, T* output)'.
// 3) If the functor is not a 'ceres::CostFunction' and the specified parameter
// dims is not dynamic, the input is expanded by using the number of parameter
// blocks. The signature of the functor must have the following signature
// 'bool()(const T* i_1, const T* i_2, ... const T* i_n, T* output)'.
template <typename ParameterDims, typename Functor, typename T>
inline bool VariadicEvaluate(const Functor& functor, T const* const* input,
T* output) {
return VariadicEvaluateImpl<ParameterDims>(functor, input, output, &functor);
}
} // namespace internal
} // namespace ceres

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -35,42 +35,22 @@
#ifndef CERES_PUBLIC_ITERATION_CALLBACK_H_
#define CERES_PUBLIC_ITERATION_CALLBACK_H_
#include "ceres/types.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/types.h"
namespace ceres {
// This struct describes the state of the optimizer after each
// iteration of the minimization.
struct CERES_EXPORT IterationSummary {
IterationSummary()
: iteration(0),
step_is_valid(false),
step_is_nonmonotonic(false),
step_is_successful(false),
cost(0.0),
cost_change(0.0),
gradient_max_norm(0.0),
gradient_norm(0.0),
step_norm(0.0),
eta(0.0),
step_size(0.0),
line_search_function_evaluations(0),
line_search_gradient_evaluations(0),
line_search_iterations(0),
linear_solver_iterations(0),
iteration_time_in_seconds(0.0),
step_solver_time_in_seconds(0.0),
cumulative_time_in_seconds(0.0) {}
// Current iteration number.
int32 iteration;
int iteration = 0;
// Step was numerically valid, i.e., all values are finite and the
// step reduces the value of the linearized model.
//
// Note: step_is_valid is always true when iteration = 0.
bool step_is_valid;
bool step_is_valid = false;
// Step did not reduce the value of the objective function
// sufficiently, but it was accepted because of the relaxed
@@ -78,7 +58,7 @@ struct CERES_EXPORT IterationSummary {
// algorithm.
//
// Note: step_is_nonmonotonic is always false when iteration = 0;
bool step_is_nonmonotonic;
bool step_is_nonmonotonic = false;
// Whether or not the minimizer accepted this step or not. If the
// ordinary trust region algorithm is used, this means that the
@@ -90,68 +70,68 @@ struct CERES_EXPORT IterationSummary {
// step and the step is declared successful.
//
// Note: step_is_successful is always true when iteration = 0.
bool step_is_successful;
bool step_is_successful = false;
// Value of the objective function.
double cost;
double cost = 0.90;
// Change in the value of the objective function in this
// iteration. This can be positive or negative.
double cost_change;
double cost_change = 0.0;
// Infinity norm of the gradient vector.
double gradient_max_norm;
double gradient_max_norm = 0.0;
// 2-norm of the gradient vector.
double gradient_norm;
double gradient_norm = 0.0;
// 2-norm of the size of the step computed by the optimization
// algorithm.
double step_norm;
double step_norm = 0.0;
// For trust region algorithms, the ratio of the actual change in
// cost and the change in the cost of the linearized approximation.
double relative_decrease;
double relative_decrease = 0.0;
// Size of the trust region at the end of the current iteration. For
// the Levenberg-Marquardt algorithm, the regularization parameter
// mu = 1.0 / trust_region_radius.
double trust_region_radius;
double trust_region_radius = 0.0;
// For the inexact step Levenberg-Marquardt algorithm, this is the
// relative accuracy with which the Newton(LM) step is solved. This
// number affects only the iterative solvers capable of solving
// linear systems inexactly. Factorization-based exact solvers
// ignore it.
double eta;
double eta = 0.0;
// Step sized computed by the line search algorithm.
double step_size;
double step_size = 0.0;
// Number of function value evaluations used by the line search algorithm.
int line_search_function_evaluations;
int line_search_function_evaluations = 0;
// Number of function gradient evaluations used by the line search algorithm.
int line_search_gradient_evaluations;
int line_search_gradient_evaluations = 0;
// Number of iterations taken by the line search algorithm.
int line_search_iterations;
int line_search_iterations = 0;
// Number of iterations taken by the linear solver to solve for the
// Newton step.
int linear_solver_iterations;
int linear_solver_iterations = 0;
// All times reported below are wall times.
// Time (in seconds) spent inside the minimizer loop in the current
// iteration.
double iteration_time_in_seconds;
double iteration_time_in_seconds = 0.0;
// Time (in seconds) spent inside the trust region step solver.
double step_solver_time_in_seconds;
double step_solver_time_in_seconds = 0.0;
// Time (in seconds) since the user called Solve().
double cumulative_time_in_seconds;
double cumulative_time_in_seconds = 0.0;
};
// Interface for specifying callbacks that are executed at the end of

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -32,10 +32,12 @@
#ifndef CERES_PUBLIC_LOCAL_PARAMETERIZATION_H_
#define CERES_PUBLIC_LOCAL_PARAMETERIZATION_H_
#include <array>
#include <memory>
#include <vector>
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/port.h"
namespace ceres {
@@ -61,7 +63,7 @@ namespace ceres {
// optimize over two dimensional vector delta in the tangent space at
// that point and then "move" to the point x + delta, where the move
// operation involves projecting back onto the sphere. Doing so
// removes a redundent dimension from the optimization, making it
// removes a redundant dimension from the optimization, making it
// numerically more robust and efficient.
//
// More generally we can define a function
@@ -154,17 +156,16 @@ class CERES_EXPORT IdentityParameterization : public LocalParameterization {
public:
explicit IdentityParameterization(int size);
virtual ~IdentityParameterization() {}
virtual bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const;
virtual bool ComputeJacobian(const double* x,
double* jacobian) const;
virtual bool MultiplyByJacobian(const double* x,
const int num_cols,
const double* global_matrix,
double* local_matrix) const;
virtual int GlobalSize() const { return size_; }
virtual int LocalSize() const { return size_; }
bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const override;
bool ComputeJacobian(const double* x, double* jacobian) const override;
bool MultiplyByJacobian(const double* x,
const int num_cols,
const double* global_matrix,
double* local_matrix) const override;
int GlobalSize() const override { return size_; }
int LocalSize() const override { return size_; }
private:
const int size_;
@@ -176,19 +177,18 @@ class CERES_EXPORT SubsetParameterization : public LocalParameterization {
explicit SubsetParameterization(int size,
const std::vector<int>& constant_parameters);
virtual ~SubsetParameterization() {}
virtual bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const;
virtual bool ComputeJacobian(const double* x,
double* jacobian) const;
virtual bool MultiplyByJacobian(const double* x,
const int num_cols,
const double* global_matrix,
double* local_matrix) const;
virtual int GlobalSize() const {
bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const override;
bool ComputeJacobian(const double* x, double* jacobian) const override;
bool MultiplyByJacobian(const double* x,
const int num_cols,
const double* global_matrix,
double* local_matrix) const override;
int GlobalSize() const override {
return static_cast<int>(constancy_mask_.size());
}
virtual int LocalSize() const { return local_size_; }
int LocalSize() const override { return local_size_; }
private:
const int local_size_;
@@ -202,13 +202,12 @@ class CERES_EXPORT SubsetParameterization : public LocalParameterization {
class CERES_EXPORT QuaternionParameterization : public LocalParameterization {
public:
virtual ~QuaternionParameterization() {}
virtual bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const;
virtual bool ComputeJacobian(const double* x,
double* jacobian) const;
virtual int GlobalSize() const { return 4; }
virtual int LocalSize() const { return 3; }
bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const override;
bool ComputeJacobian(const double* x, double* jacobian) const override;
int GlobalSize() const override { return 4; }
int LocalSize() const override { return 3; }
};
// Implements the quaternion local parameterization for Eigen's representation
@@ -222,16 +221,16 @@ class CERES_EXPORT QuaternionParameterization : public LocalParameterization {
//
// Plus(x, delta) = [sin(|delta|) delta / |delta|, cos(|delta|)] * x
// with * being the quaternion multiplication operator.
class EigenQuaternionParameterization : public ceres::LocalParameterization {
class CERES_EXPORT EigenQuaternionParameterization
: public ceres::LocalParameterization {
public:
virtual ~EigenQuaternionParameterization() {}
virtual bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const;
virtual bool ComputeJacobian(const double* x,
double* jacobian) const;
virtual int GlobalSize() const { return 4; }
virtual int LocalSize() const { return 3; }
bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const override;
bool ComputeJacobian(const double* x, double* jacobian) const override;
int GlobalSize() const override { return 4; }
int LocalSize() const override { return 3; }
};
// This provides a parameterization for homogeneous vectors which are commonly
@@ -247,32 +246,55 @@ class EigenQuaternionParameterization : public ceres::LocalParameterization {
// remain on the sphere. We assume that the last element of x is the scalar
// component. The size of the homogeneous vector is required to be greater than
// 1.
class CERES_EXPORT HomogeneousVectorParameterization :
public LocalParameterization {
class CERES_EXPORT HomogeneousVectorParameterization
: public LocalParameterization {
public:
explicit HomogeneousVectorParameterization(int size);
virtual ~HomogeneousVectorParameterization() {}
virtual bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const;
virtual bool ComputeJacobian(const double* x,
double* jacobian) const;
virtual int GlobalSize() const { return size_; }
virtual int LocalSize() const { return size_ - 1; }
bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const override;
bool ComputeJacobian(const double* x, double* jacobian) const override;
int GlobalSize() const override { return size_; }
int LocalSize() const override { return size_ - 1; }
private:
const int size_;
};
// This provides a parameterization for lines, where the line is
// over-parameterized by an origin point and a direction vector. So the
// parameter vector size needs to be two times the ambient space dimension,
// where the first half is interpreted as the origin point and the second half
// as the direction.
//
// The plus operator for the line direction is the same as for the
// HomogeneousVectorParameterization. The update of the origin point is
// perpendicular to the line direction before the update.
//
// This local parameterization is a special case of the affine Grassmannian
// manifold (see https://en.wikipedia.org/wiki/Affine_Grassmannian_(manifold))
// for the case Graff_1(R^n).
template <int AmbientSpaceDimension>
class LineParameterization : public LocalParameterization {
public:
static_assert(AmbientSpaceDimension >= 2,
"The ambient space must be at least 2");
bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const override;
bool ComputeJacobian(const double* x, double* jacobian) const override;
int GlobalSize() const override { return 2 * AmbientSpaceDimension; }
int LocalSize() const override { return 2 * (AmbientSpaceDimension - 1); }
};
// Construct a local parameterization by taking the Cartesian product
// of a number of other local parameterizations. This is useful, when
// a parameter block is the cartesian product of two or more
// manifolds. For example the parameters of a camera consist of a
// rotation and a translation, i.e., SO(3) x R^3.
//
// Currently this class supports taking the cartesian product of up to
// four local parameterizations.
//
// Example usage:
//
// ProductParameterization product_param(new QuaterionionParameterization(),
@@ -282,35 +304,49 @@ class CERES_EXPORT HomogeneousVectorParameterization :
// rotation is represented using a quaternion.
class CERES_EXPORT ProductParameterization : public LocalParameterization {
public:
ProductParameterization(const ProductParameterization&) = delete;
ProductParameterization& operator=(const ProductParameterization&) = delete;
//
// NOTE: All the constructors take ownership of the input local
// NOTE: The constructor takes ownership of the input local
// parameterizations.
//
ProductParameterization(LocalParameterization* local_param1,
LocalParameterization* local_param2);
template <typename... LocalParams>
ProductParameterization(LocalParams*... local_params)
: local_params_(sizeof...(LocalParams)),
local_size_{0},
global_size_{0},
buffer_size_{0} {
constexpr int kNumLocalParams = sizeof...(LocalParams);
static_assert(kNumLocalParams >= 2,
"At least two local parameterizations must be specified.");
ProductParameterization(LocalParameterization* local_param1,
LocalParameterization* local_param2,
LocalParameterization* local_param3);
using LocalParameterizationPtr = std::unique_ptr<LocalParameterization>;
ProductParameterization(LocalParameterization* local_param1,
LocalParameterization* local_param2,
LocalParameterization* local_param3,
LocalParameterization* local_param4);
// Wrap all raw pointers into std::unique_ptr for exception safety.
std::array<LocalParameterizationPtr, kNumLocalParams> local_params_array{
LocalParameterizationPtr(local_params)...};
virtual ~ProductParameterization();
virtual bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const;
virtual bool ComputeJacobian(const double* x,
double* jacobian) const;
virtual int GlobalSize() const { return global_size_; }
virtual int LocalSize() const { return local_size_; }
// Initialize internal state.
for (int i = 0; i < kNumLocalParams; ++i) {
LocalParameterizationPtr& param = local_params_[i];
param = std::move(local_params_array[i]);
buffer_size_ =
std::max(buffer_size_, param->LocalSize() * param->GlobalSize());
global_size_ += param->GlobalSize();
local_size_ += param->LocalSize();
}
}
bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const override;
bool ComputeJacobian(const double* x, double* jacobian) const override;
int GlobalSize() const override { return global_size_; }
int LocalSize() const override { return local_size_; }
private:
void Init();
std::vector<LocalParameterization*> local_params_;
std::vector<std::unique_ptr<LocalParameterization>> local_params_;
int local_size_;
int global_size_;
int buffer_size_;
@@ -319,5 +355,7 @@ class CERES_EXPORT ProductParameterization : public LocalParameterization {
} // namespace ceres
#include "ceres/internal/reenable_warnings.h"
#include "ceres/internal/line_parameterization.h"
#endif // CERES_PUBLIC_LOCAL_PARAMETERIZATION_H_

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -57,7 +57,7 @@
// anything special (i.e. if we used a basic quadratic loss), the
// residual for the erroneous measurement will result in extreme error
// due to the quadratic nature of squared loss. This results in the
// entire solution getting pulled away from the optimimum to reduce
// entire solution getting pulled away from the optimum to reduce
// the large error that would otherwise be attributed to the wrong
// measurement.
//
@@ -75,11 +75,11 @@
#ifndef CERES_PUBLIC_LOSS_FUNCTION_H_
#define CERES_PUBLIC_LOSS_FUNCTION_H_
#include "glog/logging.h"
#include "ceres/internal/macros.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
#include <memory>
#include "ceres/internal/disable_warnings.h"
#include "ceres/types.h"
#include "glog/logging.h"
namespace ceres {
@@ -119,7 +119,6 @@ class CERES_EXPORT LossFunction {
// Note: in the region of interest (i.e. s < 3) we have:
// TrivialLoss >= HuberLoss >= SoftLOneLoss >= CauchyLoss
// This corresponds to no robustification.
//
// rho(s) = s
@@ -131,7 +130,7 @@ class CERES_EXPORT LossFunction {
// thing.
class CERES_EXPORT TrivialLoss : public LossFunction {
public:
virtual void Evaluate(double, double*) const;
void Evaluate(double, double*) const override;
};
// Scaling
@@ -174,8 +173,8 @@ class CERES_EXPORT TrivialLoss : public LossFunction {
// http://en.wikipedia.org/wiki/Huber_Loss_Function
class CERES_EXPORT HuberLoss : public LossFunction {
public:
explicit HuberLoss(double a) : a_(a), b_(a * a) { }
virtual void Evaluate(double, double*) const;
explicit HuberLoss(double a) : a_(a), b_(a * a) {}
void Evaluate(double, double*) const override;
private:
const double a_;
@@ -187,11 +186,11 @@ class CERES_EXPORT HuberLoss : public LossFunction {
//
// rho(s) = 2 (sqrt(1 + s) - 1).
//
// At s = 0: rho = [0, 1, -1/2].
// At s = 0: rho = [0, 1, -1 / (2 * a^2)].
class CERES_EXPORT SoftLOneLoss : public LossFunction {
public:
explicit SoftLOneLoss(double a) : b_(a * a), c_(1 / b_) { }
virtual void Evaluate(double, double*) const;
explicit SoftLOneLoss(double a) : b_(a * a), c_(1 / b_) {}
void Evaluate(double, double*) const override;
private:
// b = a^2.
@@ -204,11 +203,11 @@ class CERES_EXPORT SoftLOneLoss : public LossFunction {
//
// rho(s) = log(1 + s).
//
// At s = 0: rho = [0, 1, -1].
// At s = 0: rho = [0, 1, -1 / a^2].
class CERES_EXPORT CauchyLoss : public LossFunction {
public:
explicit CauchyLoss(double a) : b_(a * a), c_(1 / b_) { }
virtual void Evaluate(double, double*) const;
explicit CauchyLoss(double a) : b_(a * a), c_(1 / b_) {}
void Evaluate(double, double*) const override;
private:
// b = a^2.
@@ -228,8 +227,8 @@ class CERES_EXPORT CauchyLoss : public LossFunction {
// At s = 0: rho = [0, 1, 0].
class CERES_EXPORT ArctanLoss : public LossFunction {
public:
explicit ArctanLoss(double a) : a_(a), b_(1 / (a * a)) { }
virtual void Evaluate(double, double*) const;
explicit ArctanLoss(double a) : a_(a), b_(1 / (a * a)) {}
void Evaluate(double, double*) const override;
private:
const double a_;
@@ -268,7 +267,7 @@ class CERES_EXPORT ArctanLoss : public LossFunction {
class CERES_EXPORT TolerantLoss : public LossFunction {
public:
explicit TolerantLoss(double a, double b);
virtual void Evaluate(double, double*) const;
void Evaluate(double, double*) const override;
private:
const double a_, b_, c_;
@@ -277,16 +276,17 @@ class CERES_EXPORT TolerantLoss : public LossFunction {
// This is the Tukey biweight loss function which aggressively
// attempts to suppress large errors.
//
// The term is computed as:
// The term is computed as follows where the equations are scaled by a
// factor of 2 because the cost function is given by 1/2 rho(s):
//
// rho(s) = a^2 / 6 * (1 - (1 - s / a^2)^3 ) for s <= a^2,
// rho(s) = a^2 / 6 for s > a^2.
// rho(s) = a^2 / 3 * (1 - (1 - s / a^2)^3 ) for s <= a^2,
// rho(s) = a^2 / 3 for s > a^2.
//
// At s = 0: rho = [0, 0.5, -1 / a^2]
// At s = 0: rho = [0, 1, -2 / a^2]
class CERES_EXPORT TukeyLoss : public ceres::LossFunction {
public:
explicit TukeyLoss(double a) : a_squared_(a * a) { }
virtual void Evaluate(double, double*) const;
explicit TukeyLoss(double a) : a_squared_(a * a) {}
void Evaluate(double, double*) const override;
private:
const double a_squared_;
@@ -297,13 +297,15 @@ class CERES_EXPORT TukeyLoss : public ceres::LossFunction {
// The loss functions must not be NULL.
class CERES_EXPORT ComposedLoss : public LossFunction {
public:
explicit ComposedLoss(const LossFunction* f, Ownership ownership_f,
const LossFunction* g, Ownership ownership_g);
explicit ComposedLoss(const LossFunction* f,
Ownership ownership_f,
const LossFunction* g,
Ownership ownership_g);
virtual ~ComposedLoss();
virtual void Evaluate(double, double*) const;
void Evaluate(double, double*) const override;
private:
internal::scoped_ptr<const LossFunction> f_, g_;
std::unique_ptr<const LossFunction> f_, g_;
const Ownership ownership_f_, ownership_g_;
};
@@ -329,21 +331,22 @@ class CERES_EXPORT ScaledLoss : public LossFunction {
// Constructs a ScaledLoss wrapping another loss function. Takes
// ownership of the wrapped loss function or not depending on the
// ownership parameter.
ScaledLoss(const LossFunction* rho, double a, Ownership ownership) :
rho_(rho), a_(a), ownership_(ownership) { }
ScaledLoss(const LossFunction* rho, double a, Ownership ownership)
: rho_(rho), a_(a), ownership_(ownership) {}
ScaledLoss(const ScaledLoss&) = delete;
void operator=(const ScaledLoss&) = delete;
virtual ~ScaledLoss() {
if (ownership_ == DO_NOT_TAKE_OWNERSHIP) {
rho_.release();
}
}
virtual void Evaluate(double, double*) const;
void Evaluate(double, double*) const override;
private:
internal::scoped_ptr<const LossFunction> rho_;
std::unique_ptr<const LossFunction> rho_;
const double a_;
const Ownership ownership_;
CERES_DISALLOW_COPY_AND_ASSIGN(ScaledLoss);
};
// Sometimes after the optimization problem has been constructed, we
@@ -387,8 +390,10 @@ class CERES_EXPORT ScaledLoss : public LossFunction {
class CERES_EXPORT LossFunctionWrapper : public LossFunction {
public:
LossFunctionWrapper(LossFunction* rho, Ownership ownership)
: rho_(rho), ownership_(ownership) {
}
: rho_(rho), ownership_(ownership) {}
LossFunctionWrapper(const LossFunctionWrapper&) = delete;
void operator=(const LossFunctionWrapper&) = delete;
virtual ~LossFunctionWrapper() {
if (ownership_ == DO_NOT_TAKE_OWNERSHIP) {
@@ -396,13 +401,12 @@ class CERES_EXPORT LossFunctionWrapper : public LossFunction {
}
}
virtual void Evaluate(double sq_norm, double out[3]) const {
void Evaluate(double sq_norm, double out[3]) const override {
if (rho_.get() == NULL) {
out[0] = sq_norm;
out[1] = 1.0;
out[2] = 0.0;
}
else {
} else {
rho_->Evaluate(sq_norm, out);
}
}
@@ -416,9 +420,8 @@ class CERES_EXPORT LossFunctionWrapper : public LossFunction {
}
private:
internal::scoped_ptr<const LossFunction> rho_;
std::unique_ptr<const LossFunction> rho_;
Ownership ownership_;
CERES_DISALLOW_COPY_AND_ASSIGN(LossFunctionWrapper);
};
} // namespace ceres

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -35,8 +35,8 @@
#define CERES_PUBLIC_NORMAL_PRIOR_H_
#include "ceres/cost_function.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/eigen.h"
namespace ceres {
@@ -57,15 +57,15 @@ namespace ceres {
// which would be the case if the covariance matrix S is rank
// deficient.
class CERES_EXPORT NormalPrior: public CostFunction {
class CERES_EXPORT NormalPrior : public CostFunction {
public:
// Check that the number of rows in the vector b are the same as the
// number of columns in the matrix A, crash otherwise.
NormalPrior(const Matrix& A, const Vector& b);
bool Evaluate(double const* const* parameters,
double* residuals,
double** jacobians) const override;
virtual bool Evaluate(double const* const* parameters,
double* residuals,
double** jacobians) const;
private:
Matrix A_;
Vector b_;

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -52,16 +52,16 @@
// The actual cost added to the total problem is e^2, or (k - x'k)^2; however,
// the squaring is implicitly done by the optimization framework.
//
// To write an numerically-differentiable cost function for the above model, first
// define the object
// To write an numerically-differentiable cost function for the above model,
// first define the object
//
// class MyScalarCostFunctor {
// MyScalarCostFunctor(double k): k_(k) {}
// explicit MyScalarCostFunctor(double k): k_(k) {}
//
// bool operator()(const double* const x,
// const double* const y,
// double* residuals) const {
// residuals[0] = k_ - x[0] * y[0] + x[1] * y[1];
// residuals[0] = k_ - x[0] * y[0] - x[1] * y[1];
// return true;
// }
//
@@ -98,6 +98,8 @@
// NumericDiffCostFunction also supports cost functions with a
// runtime-determined number of residuals. For example:
//
// clang-format off
//
// CostFunction* cost_function
// = new NumericDiffCostFunction<MyScalarCostFunctor, CENTRAL, DYNAMIC, 2, 2>(
// new CostFunctorWithDynamicNumResiduals(1.0), ^ ^ ^
@@ -109,10 +111,8 @@
// Indicate dynamic number of residuals --------------------+ | |
// Dimension of x ------------------------------------------------+ |
// Dimension of y ---------------------------------------------------+
// clang-format on
//
// The framework can currently accommodate cost functions of up to 10
// independent variables, and there is no limit on the dimensionality
// of each of them.
//
// The central difference method is considerably more accurate at the cost of
// twice as many function evaluations than forward difference. Consider using
@@ -161,10 +161,13 @@
#ifndef CERES_PUBLIC_NUMERIC_DIFF_COST_FUNCTION_H_
#define CERES_PUBLIC_NUMERIC_DIFF_COST_FUNCTION_H_
#include <array>
#include <memory>
#include "Eigen/Dense"
#include "ceres/cost_function.h"
#include "ceres/internal/numeric_diff.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/internal/parameter_dims.h"
#include "ceres/numeric_diff_options.h"
#include "ceres/sized_cost_function.h"
#include "ceres/types.h"
@@ -175,34 +178,17 @@ namespace ceres {
template <typename CostFunctor,
NumericDiffMethodType method = CENTRAL,
int kNumResiduals = 0, // Number of residuals, or ceres::DYNAMIC
int N0 = 0, // Number of parameters in block 0.
int N1 = 0, // Number of parameters in block 1.
int N2 = 0, // Number of parameters in block 2.
int N3 = 0, // Number of parameters in block 3.
int N4 = 0, // Number of parameters in block 4.
int N5 = 0, // Number of parameters in block 5.
int N6 = 0, // Number of parameters in block 6.
int N7 = 0, // Number of parameters in block 7.
int N8 = 0, // Number of parameters in block 8.
int N9 = 0> // Number of parameters in block 9.
class NumericDiffCostFunction
: public SizedCostFunction<kNumResiduals,
N0, N1, N2, N3, N4,
N5, N6, N7, N8, N9> {
int... Ns> // Parameters dimensions for each block.
class NumericDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> {
public:
NumericDiffCostFunction(
CostFunctor* functor,
Ownership ownership = TAKE_OWNERSHIP,
int num_residuals = kNumResiduals,
const NumericDiffOptions& options = NumericDiffOptions())
: functor_(functor),
ownership_(ownership),
options_(options) {
: functor_(functor), ownership_(ownership), options_(options) {
if (kNumResiduals == DYNAMIC) {
SizedCostFunction<kNumResiduals,
N0, N1, N2, N3, N4,
N5, N6, N7, N8, N9>
::set_num_residuals(num_residuals);
SizedCostFunction<kNumResiduals, Ns...>::set_num_residuals(num_residuals);
}
}
@@ -212,24 +198,21 @@ class NumericDiffCostFunction
}
}
virtual bool Evaluate(double const* const* parameters,
double* residuals,
double** jacobians) const {
bool Evaluate(double const* const* parameters,
double* residuals,
double** jacobians) const override {
using internal::FixedArray;
using internal::NumericDiff;
const int kNumParameters = N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9;
const int kNumParameterBlocks =
(N0 > 0) + (N1 > 0) + (N2 > 0) + (N3 > 0) + (N4 > 0) +
(N5 > 0) + (N6 > 0) + (N7 > 0) + (N8 > 0) + (N9 > 0);
using ParameterDims =
typename SizedCostFunction<kNumResiduals, Ns...>::ParameterDims;
constexpr int kNumParameters = ParameterDims::kNumParameters;
constexpr int kNumParameterBlocks = ParameterDims::kNumParameterBlocks;
// Get the function value (residuals) at the the point to evaluate.
if (!internal::EvaluateImpl<CostFunctor,
N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>(
functor_.get(),
parameters,
residuals,
functor_.get())) {
if (!internal::VariadicEvaluate<ParameterDims>(
*functor_, parameters, residuals)) {
return false;
}
@@ -239,77 +222,29 @@ class NumericDiffCostFunction
// Create a copy of the parameters which will get mutated.
FixedArray<double> parameters_copy(kNumParameters);
FixedArray<double*> parameters_reference_copy(kNumParameterBlocks);
std::array<double*, kNumParameterBlocks> parameters_reference_copy =
ParameterDims::GetUnpackedParameters(parameters_copy.data());
parameters_reference_copy[0] = parameters_copy.get();
if (N1) parameters_reference_copy[1] = parameters_reference_copy[0] + N0;
if (N2) parameters_reference_copy[2] = parameters_reference_copy[1] + N1;
if (N3) parameters_reference_copy[3] = parameters_reference_copy[2] + N2;
if (N4) parameters_reference_copy[4] = parameters_reference_copy[3] + N3;
if (N5) parameters_reference_copy[5] = parameters_reference_copy[4] + N4;
if (N6) parameters_reference_copy[6] = parameters_reference_copy[5] + N5;
if (N7) parameters_reference_copy[7] = parameters_reference_copy[6] + N6;
if (N8) parameters_reference_copy[8] = parameters_reference_copy[7] + N7;
if (N9) parameters_reference_copy[9] = parameters_reference_copy[8] + N8;
#define CERES_COPY_PARAMETER_BLOCK(block) \
if (N ## block) memcpy(parameters_reference_copy[block], \
parameters[block], \
sizeof(double) * N ## block); // NOLINT
CERES_COPY_PARAMETER_BLOCK(0);
CERES_COPY_PARAMETER_BLOCK(1);
CERES_COPY_PARAMETER_BLOCK(2);
CERES_COPY_PARAMETER_BLOCK(3);
CERES_COPY_PARAMETER_BLOCK(4);
CERES_COPY_PARAMETER_BLOCK(5);
CERES_COPY_PARAMETER_BLOCK(6);
CERES_COPY_PARAMETER_BLOCK(7);
CERES_COPY_PARAMETER_BLOCK(8);
CERES_COPY_PARAMETER_BLOCK(9);
#undef CERES_COPY_PARAMETER_BLOCK
#define CERES_EVALUATE_JACOBIAN_FOR_BLOCK(block) \
if (N ## block && jacobians[block] != NULL) { \
if (!NumericDiff<CostFunctor, \
method, \
kNumResiduals, \
N0, N1, N2, N3, N4, N5, N6, N7, N8, N9, \
block, \
N ## block >::EvaluateJacobianForParameterBlock( \
functor_.get(), \
residuals, \
options_, \
SizedCostFunction<kNumResiduals, \
N0, N1, N2, N3, N4, \
N5, N6, N7, N8, N9>::num_residuals(), \
block, \
N ## block, \
parameters_reference_copy.get(), \
jacobians[block])) { \
return false; \
} \
for (int block = 0; block < kNumParameterBlocks; ++block) {
memcpy(parameters_reference_copy[block],
parameters[block],
sizeof(double) * ParameterDims::GetDim(block));
}
CERES_EVALUATE_JACOBIAN_FOR_BLOCK(0);
CERES_EVALUATE_JACOBIAN_FOR_BLOCK(1);
CERES_EVALUATE_JACOBIAN_FOR_BLOCK(2);
CERES_EVALUATE_JACOBIAN_FOR_BLOCK(3);
CERES_EVALUATE_JACOBIAN_FOR_BLOCK(4);
CERES_EVALUATE_JACOBIAN_FOR_BLOCK(5);
CERES_EVALUATE_JACOBIAN_FOR_BLOCK(6);
CERES_EVALUATE_JACOBIAN_FOR_BLOCK(7);
CERES_EVALUATE_JACOBIAN_FOR_BLOCK(8);
CERES_EVALUATE_JACOBIAN_FOR_BLOCK(9);
#undef CERES_EVALUATE_JACOBIAN_FOR_BLOCK
internal::EvaluateJacobianForParameterBlocks<ParameterDims>::
template Apply<method, kNumResiduals>(
functor_.get(),
residuals,
options_,
SizedCostFunction<kNumResiduals, Ns...>::num_residuals(),
parameters_reference_copy.data(),
jacobians);
return true;
}
private:
internal::scoped_ptr<CostFunctor> functor_;
std::unique_ptr<CostFunctor> functor_;
Ownership ownership_;
NumericDiffOptions options_;
};

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -32,23 +32,17 @@
#ifndef CERES_PUBLIC_NUMERIC_DIFF_OPTIONS_H_
#define CERES_PUBLIC_NUMERIC_DIFF_OPTIONS_H_
#include "ceres/internal/port.h"
namespace ceres {
// Options pertaining to numeric differentiation (e.g., convergence criteria,
// step sizes).
struct CERES_EXPORT NumericDiffOptions {
NumericDiffOptions() {
relative_step_size = 1e-6;
ridders_relative_initial_step_size = 1e-2;
max_num_ridders_extrapolations = 10;
ridders_epsilon = 1e-12;
ridders_step_shrink_factor = 2.0;
}
// Numeric differentiation step size (multiplied by parameter block's
// order of magnitude). If parameters are close to zero, the step size
// is set to sqrt(machine_epsilon).
double relative_step_size;
double relative_step_size = 1e-6;
// Initial step size for Ridders adaptive numeric differentiation (multiplied
// by parameter block's order of magnitude).
@@ -59,19 +53,19 @@ struct CERES_EXPORT NumericDiffOptions {
// Note: For Ridders' method to converge, the step size should be initialized
// to a value that is large enough to produce a significant change in the
// function. As the derivative is estimated, the step size decreases.
double ridders_relative_initial_step_size;
double ridders_relative_initial_step_size = 1e-2;
// Maximal number of adaptive extrapolations (sampling) in Ridders' method.
int max_num_ridders_extrapolations;
int max_num_ridders_extrapolations = 10;
// Convergence criterion on extrapolation error for Ridders adaptive
// differentiation. The available error estimation methods are defined in
// NumericDiffErrorType and set in the "ridders_error_method" field.
double ridders_epsilon;
double ridders_epsilon = 1e-12;
// The factor in which to shrink the step size with each extrapolation in
// Ridders' method.
double ridders_step_shrink_factor;
double ridders_step_shrink_factor = 2.0;
};
} // namespace ceres

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -33,7 +33,9 @@
#include <map>
#include <set>
#include <unordered_map>
#include <vector>
#include "ceres/internal/port.h"
#include "glog/logging.h"
@@ -63,8 +65,7 @@ class OrderedGroups {
return false;
}
typename std::map<T, int>::const_iterator it =
element_to_group_.find(element);
auto it = element_to_group_.find(element);
if (it != element_to_group_.end()) {
if (it->second == group) {
// Element is already in the right group, nothing to do.
@@ -126,17 +127,14 @@ class OrderedGroups {
return;
}
typename std::map<int, std::set<T> >::reverse_iterator it =
group_to_elements_.rbegin();
std::map<int, std::set<T> > new_group_to_elements;
auto it = group_to_elements_.rbegin();
std::map<int, std::set<T>> new_group_to_elements;
new_group_to_elements[it->first] = it->second;
int new_group_id = it->first + 1;
for (++it; it != group_to_elements_.rend(); ++it) {
for (typename std::set<T>::const_iterator element_it = it->second.begin();
element_it != it->second.end();
++element_it) {
element_to_group_[*element_it] = new_group_id;
for (const auto& element : it->second) {
element_to_group_[element] = new_group_id;
}
new_group_to_elements[new_group_id] = it->second;
new_group_id++;
@@ -148,8 +146,7 @@ class OrderedGroups {
// Return the group id for the element. If the element is not a
// member of any group, return -1.
int GroupId(const T element) const {
typename std::map<T, int>::const_iterator it =
element_to_group_.find(element);
auto it = element_to_group_.find(element);
if (it == element_to_group_.end()) {
return -1;
}
@@ -157,27 +154,21 @@ class OrderedGroups {
}
bool IsMember(const T element) const {
typename std::map<T, int>::const_iterator it =
element_to_group_.find(element);
auto it = element_to_group_.find(element);
return (it != element_to_group_.end());
}
// This function always succeeds, i.e., implicitly there exists a
// group for every integer.
int GroupSize(const int group) const {
typename std::map<int, std::set<T> >::const_iterator it =
group_to_elements_.find(group);
return (it == group_to_elements_.end()) ? 0 : it->second.size();
auto it = group_to_elements_.find(group);
return (it == group_to_elements_.end()) ? 0 : it->second.size();
}
int NumElements() const {
return element_to_group_.size();
}
int NumElements() const { return element_to_group_.size(); }
// Number of groups with one or more elements.
int NumGroups() const {
return group_to_elements_.size();
}
int NumGroups() const { return group_to_elements_.size(); }
// The first group with one or more elements. Calling this when
// there are no groups with non-zero elements will result in a
@@ -187,17 +178,15 @@ class OrderedGroups {
return group_to_elements_.begin()->first;
}
const std::map<int, std::set<T> >& group_to_elements() const {
const std::map<int, std::set<T>>& group_to_elements() const {
return group_to_elements_;
}
const std::map<T, int>& element_to_group() const {
return element_to_group_;
}
const std::map<T, int>& element_to_group() const { return element_to_group_; }
private:
std::map<int, std::set<T> > group_to_elements_;
std::map<T, int> element_to_group_;
std::map<int, std::set<T>> group_to_elements_;
std::unordered_map<T, int> element_to_group_;
};
// Typedef for the most commonly used version of OrderedGroups.

View File

@@ -34,22 +34,23 @@
#ifndef CERES_PUBLIC_PROBLEM_H_
#define CERES_PUBLIC_PROBLEM_H_
#include <array>
#include <cstddef>
#include <map>
#include <memory>
#include <set>
#include <vector>
#include "glog/logging.h"
#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
#include "ceres/context.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/port.h"
#include "ceres/types.h"
#include "glog/logging.h"
namespace ceres {
class CostFunction;
class EvaluationCallback;
class LossFunction;
class LocalParameterization;
class Solver;
@@ -114,20 +115,13 @@ typedef internal::ResidualBlock* ResidualBlockId;
//
// Problem problem;
//
// problem.AddResidualBlock(new MyUnaryCostFunction(...), x1);
// problem.AddResidualBlock(new MyBinaryCostFunction(...), x2, x3);
// problem.AddResidualBlock(new MyUnaryCostFunction(...), nullptr, x1);
// problem.AddResidualBlock(new MyBinaryCostFunction(...), nullptr, x2, x3);
//
// Please see cost_function.h for details of the CostFunction object.
class CERES_EXPORT Problem {
public:
struct CERES_EXPORT Options {
Options()
: cost_function_ownership(TAKE_OWNERSHIP),
loss_function_ownership(TAKE_OWNERSHIP),
local_parameterization_ownership(TAKE_OWNERSHIP),
enable_fast_removal(false),
disable_all_safety_checks(false) {}
// These flags control whether the Problem object owns the cost
// functions, loss functions, and parameterizations passed into
// the Problem. If set to TAKE_OWNERSHIP, then the problem object
@@ -135,25 +129,25 @@ class CERES_EXPORT Problem {
// destruction. The destructor is careful to delete the pointers
// only once, since sharing cost/loss/parameterizations is
// allowed.
Ownership cost_function_ownership;
Ownership loss_function_ownership;
Ownership local_parameterization_ownership;
Ownership cost_function_ownership = TAKE_OWNERSHIP;
Ownership loss_function_ownership = TAKE_OWNERSHIP;
Ownership local_parameterization_ownership = TAKE_OWNERSHIP;
// If true, trades memory for faster RemoveResidualBlock() and
// RemoveParameterBlock() operations.
//
// By default, RemoveParameterBlock() and RemoveResidualBlock() take time
// proportional to the size of the entire problem. If you only ever remove
// parameters or residuals from the problem occassionally, this might be
// parameters or residuals from the problem occasionally, this might be
// acceptable. However, if you have memory to spare, enable this option to
// make RemoveParameterBlock() take time proportional to the number of
// residual blocks that depend on it, and RemoveResidualBlock() take (on
// average) constant time.
//
// The increase in memory usage is twofold: an additonal hash set per
// The increase in memory usage is twofold: an additional hash set per
// parameter block containing all the residuals that depend on the parameter
// block; and a hash set in the problem containing all residuals.
bool enable_fast_removal;
bool enable_fast_removal = false;
// By default, Ceres performs a variety of safety checks when constructing
// the problem. There is a small but measurable performance penalty to
@@ -164,22 +158,51 @@ class CERES_EXPORT Problem {
//
// WARNING: Do not set this to true, unless you are absolutely sure of what
// you are doing.
bool disable_all_safety_checks;
bool disable_all_safety_checks = false;
// A Ceres global context to use for solving this problem. This may help to
// reduce computation time as Ceres can reuse expensive objects to create.
// The context object can be nullptr, in which case Ceres may create one.
//
// Ceres does NOT take ownership of the pointer.
Context* context = nullptr;
// Using this callback interface, Ceres can notify you when it is
// about to evaluate the residuals or jacobians. With the
// callback, you can share computation between residual blocks by
// doing the shared computation in
// EvaluationCallback::PrepareForEvaluation() before Ceres calls
// CostFunction::Evaluate(). It also enables caching results
// between a pure residual evaluation and a residual & jacobian
// evaluation.
//
// Problem DOES NOT take ownership of the callback.
//
// NOTE: Evaluation callbacks are incompatible with inner
// iterations. So calling Solve with
// Solver::Options::use_inner_iterations = true on a Problem with
// a non-null evaluation callback is an error.
EvaluationCallback* evaluation_callback = nullptr;
};
// The default constructor is equivalent to the
// invocation Problem(Problem::Options()).
Problem();
explicit Problem(const Options& options);
Problem(Problem&&);
Problem& operator=(Problem&&);
Problem(const Problem&) = delete;
Problem& operator=(const Problem&) = delete;
~Problem();
// Add a residual block to the overall cost function. The cost
// function carries with it information about the sizes of the
// function carries with its information about the sizes of the
// parameter blocks it expects. The function checks that these match
// the sizes of the parameter blocks listed in parameter_blocks. The
// program aborts if a mismatch is detected. loss_function can be
// NULL, in which case the cost of the term is just the squared norm
// nullptr, in which case the cost of the term is just the squared norm
// of the residuals.
//
// The user has the option of explicitly adding the parameter blocks
@@ -208,59 +231,35 @@ class CERES_EXPORT Problem {
//
// Problem problem;
//
// problem.AddResidualBlock(new MyUnaryCostFunction(...), NULL, x1);
// problem.AddResidualBlock(new MyBinaryCostFunction(...), NULL, x2, x1);
// problem.AddResidualBlock(new MyUnaryCostFunction(...), nullptr, x1);
// problem.AddResidualBlock(new MyBinaryCostFunction(...), nullptr, x2, x1);
//
// Add a residual block by listing the parameter block pointers
// directly instead of wapping them in a container.
template <typename... Ts>
ResidualBlockId AddResidualBlock(CostFunction* cost_function,
LossFunction* loss_function,
double* x0,
Ts*... xs) {
const std::array<double*, sizeof...(Ts) + 1> parameter_blocks{{x0, xs...}};
return AddResidualBlock(cost_function,
loss_function,
parameter_blocks.data(),
static_cast<int>(parameter_blocks.size()));
}
// Add a residual block by providing a vector of parameter blocks.
ResidualBlockId AddResidualBlock(
CostFunction* cost_function,
LossFunction* loss_function,
const std::vector<double*>& parameter_blocks);
// Convenience methods for adding residuals with a small number of
// parameters. This is the common case. Instead of specifying the
// parameter block arguments as a vector, list them as pointers.
// Add a residual block by providing a pointer to the parameter block array
// and the number of parameter blocks.
ResidualBlockId AddResidualBlock(CostFunction* cost_function,
LossFunction* loss_function,
double* x0);
ResidualBlockId AddResidualBlock(CostFunction* cost_function,
LossFunction* loss_function,
double* x0, double* x1);
ResidualBlockId AddResidualBlock(CostFunction* cost_function,
LossFunction* loss_function,
double* x0, double* x1, double* x2);
ResidualBlockId AddResidualBlock(CostFunction* cost_function,
LossFunction* loss_function,
double* x0, double* x1, double* x2,
double* x3);
ResidualBlockId AddResidualBlock(CostFunction* cost_function,
LossFunction* loss_function,
double* x0, double* x1, double* x2,
double* x3, double* x4);
ResidualBlockId AddResidualBlock(CostFunction* cost_function,
LossFunction* loss_function,
double* x0, double* x1, double* x2,
double* x3, double* x4, double* x5);
ResidualBlockId AddResidualBlock(CostFunction* cost_function,
LossFunction* loss_function,
double* x0, double* x1, double* x2,
double* x3, double* x4, double* x5,
double* x6);
ResidualBlockId AddResidualBlock(CostFunction* cost_function,
LossFunction* loss_function,
double* x0, double* x1, double* x2,
double* x3, double* x4, double* x5,
double* x6, double* x7);
ResidualBlockId AddResidualBlock(CostFunction* cost_function,
LossFunction* loss_function,
double* x0, double* x1, double* x2,
double* x3, double* x4, double* x5,
double* x6, double* x7, double* x8);
ResidualBlockId AddResidualBlock(CostFunction* cost_function,
LossFunction* loss_function,
double* x0, double* x1, double* x2,
double* x3, double* x4, double* x5,
double* x6, double* x7, double* x8,
double* x9);
double* const* const parameter_blocks,
int num_parameter_blocks);
// Add a parameter block with appropriate size to the problem.
// Repeated calls with the same arguments are ignored. Repeated
@@ -290,7 +289,7 @@ class CERES_EXPORT Problem {
// ordering, rendering the jacobian or residuals returned from the solver
// uninterpretable. If you depend on the evaluated jacobian, do not use
// remove! This may change in a future release.
void RemoveParameterBlock(double* values);
void RemoveParameterBlock(const double* values);
// Remove a residual block from the problem. Any parameters that the residual
// block depends on are not removed. The cost and loss functions for the
@@ -304,32 +303,43 @@ class CERES_EXPORT Problem {
void RemoveResidualBlock(ResidualBlockId residual_block);
// Hold the indicated parameter block constant during optimization.
void SetParameterBlockConstant(double* values);
void SetParameterBlockConstant(const double* values);
// Allow the indicated parameter block to vary during optimization.
void SetParameterBlockVariable(double* values);
// Returns true if a parameter block is set constant, and false otherwise.
bool IsParameterBlockConstant(double* values) const;
// Returns true if a parameter block is set constant, and false
// otherwise. A parameter block may be set constant in two ways:
// either by calling SetParameterBlockConstant or by associating a
// LocalParameterization with a zero dimensional tangent space with
// it.
bool IsParameterBlockConstant(const double* values) const;
// Set the local parameterization for one of the parameter blocks.
// The local_parameterization is owned by the Problem by default. It
// is acceptable to set the same parameterization for multiple
// parameters; the destructor is careful to delete local
// parameterizations only once. The local parameterization can only
// be set once per parameter, and cannot be changed once set.
// parameterizations only once. Calling SetParameterization with
// nullptr will clear any previously set parameterization.
void SetParameterization(double* values,
LocalParameterization* local_parameterization);
// Get the local parameterization object associated with this
// parameter block. If there is no parameterization object
// associated then NULL is returned.
const LocalParameterization* GetParameterization(double* values) const;
// associated then nullptr is returned.
const LocalParameterization* GetParameterization(const double* values) const;
// Set the lower/upper bound for the parameter with position "index".
// Set the lower/upper bound for the parameter at position "index".
void SetParameterLowerBound(double* values, int index, double lower_bound);
void SetParameterUpperBound(double* values, int index, double upper_bound);
// Get the lower/upper bound for the parameter at position
// "index". If the parameter is not bounded by the user, then its
// lower bound is -std::numeric_limits<double>::max() and upper
// bound is std::numeric_limits<double>::max().
double GetParameterLowerBound(const double* values, int index) const;
double GetParameterUpperBound(const double* values, int index) const;
// Number of parameter blocks in the problem. Always equals
// parameter_blocks().size() and parameter_block_sizes().size().
int NumParameterBlocks() const;
@@ -376,7 +386,7 @@ class CERES_EXPORT Problem {
const CostFunction* GetCostFunctionForResidualBlock(
const ResidualBlockId residual_block) const;
// Get the LossFunction for the given residual block. Returns NULL
// Get the LossFunction for the given residual block. Returns nullptr
// if no loss function is associated with this residual block.
const LossFunction* GetLossFunctionForResidualBlock(
const ResidualBlockId residual_block) const;
@@ -393,11 +403,6 @@ class CERES_EXPORT Problem {
// Options struct to control Problem::Evaluate.
struct EvaluateOptions {
EvaluateOptions()
: apply_loss_function(true),
num_threads(1) {
}
// The set of parameter blocks for which evaluation should be
// performed. This vector determines the order that parameter
// blocks occur in the gradient vector and in the columns of the
@@ -430,12 +435,12 @@ class CERES_EXPORT Problem {
// function. This is of use for example if the user wishes to
// analyse the solution quality by studying the distribution of
// residuals before and after the solve.
bool apply_loss_function;
bool apply_loss_function = true;
int num_threads;
int num_threads = 1;
};
// Evaluate Problem. Any of the output pointers can be NULL. Which
// Evaluate Problem. Any of the output pointers can be nullptr. Which
// residual blocks and parameter blocks are used is controlled by
// the EvaluateOptions struct above.
//
@@ -445,16 +450,16 @@ class CERES_EXPORT Problem {
//
// Problem problem;
// double x = 1;
// problem.AddResidualBlock(new MyCostFunction, NULL, &x);
// problem.AddResidualBlock(new MyCostFunction, nullptr, &x);
//
// double cost = 0.0;
// problem.Evaluate(Problem::EvaluateOptions(), &cost, NULL, NULL, NULL);
// problem.Evaluate(Problem::EvaluateOptions(), &cost, nullptr, nullptr, nullptr);
//
// The cost is evaluated at x = 1. If you wish to evaluate the
// problem at x = 2, then
//
// x = 2;
// problem.Evaluate(Problem::EvaluateOptions(), &cost, NULL, NULL, NULL);
// x = 2;
// problem.Evaluate(Problem::EvaluateOptions(), &cost, nullptr, nullptr, nullptr);
//
// is the way to do so.
//
@@ -468,17 +473,63 @@ class CERES_EXPORT Problem {
// Note 3: This function cannot be called while the problem is being
// solved, for example it cannot be called from an IterationCallback
// at the end of an iteration during a solve.
//
// Note 4: If an EvaluationCallback is associated with the problem,
// then its PrepareForEvaluation method will be called everytime
// this method is called with new_point = true.
bool Evaluate(const EvaluateOptions& options,
double* cost,
std::vector<double>* residuals,
std::vector<double>* gradient,
CRSMatrix* jacobian);
// Evaluates the residual block, storing the scalar cost in *cost,
// the residual components in *residuals, and the jacobians between
// the parameters and residuals in jacobians[i], in row-major order.
//
// If residuals is nullptr, the residuals are not computed.
//
// If jacobians is nullptr, no Jacobians are computed. If
// jacobians[i] is nullptr, then the Jacobian for that parameter
// block is not computed.
//
// It is not okay to request the Jacobian w.r.t a parameter block
// that is constant.
//
// The return value indicates the success or failure. Even if the
// function returns false, the caller should expect the output
// memory locations to have been modified.
//
// The returned cost and jacobians have had robustification and
// local parameterizations applied already; for example, the
// jacobian for a 4-dimensional quaternion parameter using the
// "QuaternionParameterization" is num_residuals by 3 instead of
// num_residuals by 4.
//
// apply_loss_function as the name implies allows the user to switch
// the application of the loss function on and off.
//
// WARNING: If an EvaluationCallback is associated with the problem
// then it is the user's responsibility to call it before calling
// this method.
//
// This is because, if the user calls this method multiple times, we
// cannot tell if the underlying parameter blocks have changed
// between calls or not. So if EvaluateResidualBlock was responsible
// for calling the EvaluationCallback, it will have to do it
// everytime it is called. Which makes the common case where the
// parameter blocks do not change, inefficient. So we leave it to
// the user to call the EvaluationCallback as needed.
bool EvaluateResidualBlock(ResidualBlockId residual_block_id,
bool apply_loss_function,
double* cost,
double* residuals,
double** jacobians) const;
private:
friend class Solver;
friend class Covariance;
internal::scoped_ptr<internal::ProblemImpl> problem_impl_;
CERES_DISALLOW_COPY_AND_ASSIGN(Problem);
std::unique_ptr<internal::ProblemImpl> impl_;
};
} // namespace ceres

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -49,6 +49,8 @@
#include <cmath>
#include <limits>
#include "glog/logging.h"
namespace ceres {
// Trivial wrapper to index linear arrays as matrices, given a fixed
@@ -82,47 +84,44 @@ MatrixAdapter<T, 3, 1> RowMajorAdapter3x3(T* pointer);
// and quaternion is a 4-tuple that will contain the resulting quaternion.
// The implementation may be used with auto-differentiation up to the first
// derivative, higher derivatives may have unexpected results near the origin.
template<typename T>
template <typename T>
void AngleAxisToQuaternion(const T* angle_axis, T* quaternion);
// Convert a quaternion to the equivalent combined axis-angle representation.
// The value quaternion must be a unit quaternion - it is not normalized first,
// and angle_axis will be filled with a value whose norm is the angle of
// rotation in radians, and whose direction is the axis of rotation.
// The implemention may be used with auto-differentiation up to the first
// The implementation may be used with auto-differentiation up to the first
// derivative, higher derivatives may have unexpected results near the origin.
template<typename T>
template <typename T>
void QuaternionToAngleAxis(const T* quaternion, T* angle_axis);
// Conversions between 3x3 rotation matrix (in column major order) and
// quaternion rotation representations. Templated for use with
// quaternion rotation representations. Templated for use with
// autodifferentiation.
template <typename T>
void RotationMatrixToQuaternion(const T* R, T* quaternion);
template <typename T, int row_stride, int col_stride>
void RotationMatrixToQuaternion(
const MatrixAdapter<const T, row_stride, col_stride>& R,
T* quaternion);
const MatrixAdapter<const T, row_stride, col_stride>& R, T* quaternion);
// Conversions between 3x3 rotation matrix (in column major order) and
// axis-angle rotation representations. Templated for use with
// axis-angle rotation representations. Templated for use with
// autodifferentiation.
template <typename T>
void RotationMatrixToAngleAxis(const T* R, T* angle_axis);
template <typename T, int row_stride, int col_stride>
void RotationMatrixToAngleAxis(
const MatrixAdapter<const T, row_stride, col_stride>& R,
T* angle_axis);
const MatrixAdapter<const T, row_stride, col_stride>& R, T* angle_axis);
template <typename T>
void AngleAxisToRotationMatrix(const T* angle_axis, T* R);
template <typename T, int row_stride, int col_stride>
void AngleAxisToRotationMatrix(
const T* angle_axis,
const MatrixAdapter<T, row_stride, col_stride>& R);
const T* angle_axis, const MatrixAdapter<T, row_stride, col_stride>& R);
// Conversions between 3x3 rotation matrix (in row major order) and
// Euler angle (in degrees) rotation representations.
@@ -135,8 +134,7 @@ void EulerAnglesToRotationMatrix(const T* euler, int row_stride, T* R);
template <typename T, int row_stride, int col_stride>
void EulerAnglesToRotationMatrix(
const T* euler,
const MatrixAdapter<T, row_stride, col_stride>& R);
const T* euler, const MatrixAdapter<T, row_stride, col_stride>& R);
// Convert a 4-vector to a 3x3 scaled rotation matrix.
//
@@ -157,25 +155,23 @@ void EulerAnglesToRotationMatrix(
// such that det(Q) = 1 and Q*Q' = I
//
// WARNING: The rotation matrix is ROW MAJOR
template <typename T> inline
void QuaternionToScaledRotation(const T q[4], T R[3 * 3]);
template <typename T>
inline void QuaternionToScaledRotation(const T q[4], T R[3 * 3]);
template <typename T, int row_stride, int col_stride> inline
void QuaternionToScaledRotation(
const T q[4],
const MatrixAdapter<T, row_stride, col_stride>& R);
template <typename T, int row_stride, int col_stride>
inline void QuaternionToScaledRotation(
const T q[4], const MatrixAdapter<T, row_stride, col_stride>& R);
// Same as above except that the rotation matrix is normalized by the
// Frobenius norm, so that R * R' = I (and det(R) = 1).
//
// WARNING: The rotation matrix is ROW MAJOR
template <typename T> inline
void QuaternionToRotation(const T q[4], T R[3 * 3]);
template <typename T>
inline void QuaternionToRotation(const T q[4], T R[3 * 3]);
template <typename T, int row_stride, int col_stride> inline
void QuaternionToRotation(
const T q[4],
const MatrixAdapter<T, row_stride, col_stride>& R);
template <typename T, int row_stride, int col_stride>
inline void QuaternionToRotation(
const T q[4], const MatrixAdapter<T, row_stride, col_stride>& R);
// Rotates a point pt by a quaternion q:
//
@@ -185,37 +181,54 @@ void QuaternionToRotation(
// write the transform as (something)*pt + pt, as is clear from the
// formula below. If you pass in a quaternion with |q|^2 = 2 then you
// WILL NOT get back 2 times the result you get for a unit quaternion.
template <typename T> inline
void UnitQuaternionRotatePoint(const T q[4], const T pt[3], T result[3]);
//
// Inplace rotation is not supported. pt and result must point to different
// memory locations, otherwise the result will be undefined.
template <typename T>
inline void UnitQuaternionRotatePoint(const T q[4], const T pt[3], T result[3]);
// With this function you do not need to assume that q has unit norm.
// It does assume that the norm is non-zero.
template <typename T> inline
void QuaternionRotatePoint(const T q[4], const T pt[3], T result[3]);
//
// Inplace rotation is not supported. pt and result must point to different
// memory locations, otherwise the result will be undefined.
template <typename T>
inline void QuaternionRotatePoint(const T q[4], const T pt[3], T result[3]);
// zw = z * w, where * is the Quaternion product between 4 vectors.
template<typename T> inline
void QuaternionProduct(const T z[4], const T w[4], T zw[4]);
//
// Inplace quaternion product is not supported. The resulting quaternion zw must
// not share the memory with the input quaternion z and w, otherwise the result
// will be undefined.
template <typename T>
inline void QuaternionProduct(const T z[4], const T w[4], T zw[4]);
// xy = x cross y;
template<typename T> inline
void CrossProduct(const T x[3], const T y[3], T x_cross_y[3]);
//
// Inplace cross product is not supported. The resulting vector x_cross_y must
// not share the memory with the input vectors x and y, otherwise the result
// will be undefined.
template <typename T>
inline void CrossProduct(const T x[3], const T y[3], T x_cross_y[3]);
template<typename T> inline
T DotProduct(const T x[3], const T y[3]);
template <typename T>
inline T DotProduct(const T x[3], const T y[3]);
// y = R(angle_axis) * x;
template<typename T> inline
void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]);
//
// Inplace rotation is not supported. pt and result must point to different
// memory locations, otherwise the result will be undefined.
template <typename T>
inline void AngleAxisRotatePoint(const T angle_axis[3],
const T pt[3],
T result[3]);
// --- IMPLEMENTATION
template<typename T, int row_stride, int col_stride>
template <typename T, int row_stride, int col_stride>
struct MatrixAdapter {
T* pointer_;
explicit MatrixAdapter(T* pointer)
: pointer_(pointer)
{}
explicit MatrixAdapter(T* pointer) : pointer_(pointer) {}
T& operator()(int r, int c) const {
return pointer_[r * row_stride + c * col_stride];
@@ -232,7 +245,7 @@ MatrixAdapter<T, 3, 1> RowMajorAdapter3x3(T* pointer) {
return MatrixAdapter<T, 3, 1>(pointer);
}
template<typename T>
template <typename T>
inline void AngleAxisToQuaternion(const T* angle_axis, T* quaternion) {
const T& a0 = angle_axis[0];
const T& a1 = angle_axis[1];
@@ -261,7 +274,7 @@ inline void AngleAxisToQuaternion(const T* angle_axis, T* quaternion) {
}
}
template<typename T>
template <typename T>
inline void QuaternionToAngleAxis(const T* quaternion, T* angle_axis) {
const T& q1 = quaternion[1];
const T& q2 = quaternion[2];
@@ -288,9 +301,8 @@ inline void QuaternionToAngleAxis(const T* quaternion, T* angle_axis) {
// = atan(-sin(theta), -cos(theta))
//
const T two_theta =
T(2.0) * ((cos_theta < 0.0)
? atan2(-sin_theta, -cos_theta)
: atan2(sin_theta, cos_theta));
T(2.0) * ((cos_theta < T(0.0)) ? atan2(-sin_theta, -cos_theta)
: atan2(sin_theta, cos_theta));
const T k = two_theta / sin_theta;
angle_axis[0] = q1 * k;
angle_axis[1] = q2 * k;
@@ -316,8 +328,7 @@ void RotationMatrixToQuaternion(const T* R, T* angle_axis) {
// Ken Shoemake, 1987 SIGGRAPH course notes
template <typename T, int row_stride, int col_stride>
void RotationMatrixToQuaternion(
const MatrixAdapter<const T, row_stride, col_stride>& R,
T* quaternion) {
const MatrixAdapter<const T, row_stride, col_stride>& R, T* quaternion) {
const T trace = R(0, 0) + R(1, 1) + R(2, 2);
if (trace >= 0.0) {
T t = sqrt(trace + T(1.0));
@@ -359,8 +370,7 @@ inline void RotationMatrixToAngleAxis(const T* R, T* angle_axis) {
template <typename T, int row_stride, int col_stride>
void RotationMatrixToAngleAxis(
const MatrixAdapter<const T, row_stride, col_stride>& R,
T* angle_axis) {
const MatrixAdapter<const T, row_stride, col_stride>& R, T* angle_axis) {
T quaternion[4];
RotationMatrixToQuaternion(R, quaternion);
QuaternionToAngleAxis(quaternion, angle_axis);
@@ -374,8 +384,7 @@ inline void AngleAxisToRotationMatrix(const T* angle_axis, T* R) {
template <typename T, int row_stride, int col_stride>
void AngleAxisToRotationMatrix(
const T* angle_axis,
const MatrixAdapter<T, row_stride, col_stride>& R) {
const T* angle_axis, const MatrixAdapter<T, row_stride, col_stride>& R) {
static const T kOne = T(1.0);
const T theta2 = DotProduct(angle_axis, angle_axis);
if (theta2 > T(std::numeric_limits<double>::epsilon())) {
@@ -390,6 +399,7 @@ void AngleAxisToRotationMatrix(
const T costheta = cos(theta);
const T sintheta = sin(theta);
// clang-format off
R(0, 0) = costheta + wx*wx*(kOne - costheta);
R(1, 0) = wz*sintheta + wx*wy*(kOne - costheta);
R(2, 0) = -wy*sintheta + wx*wz*(kOne - costheta);
@@ -399,15 +409,16 @@ void AngleAxisToRotationMatrix(
R(0, 2) = wy*sintheta + wx*wz*(kOne - costheta);
R(1, 2) = -wx*sintheta + wy*wz*(kOne - costheta);
R(2, 2) = costheta + wz*wz*(kOne - costheta);
// clang-format on
} else {
// Near zero, we switch to using the first order Taylor expansion.
R(0, 0) = kOne;
R(1, 0) = angle_axis[2];
R(0, 0) = kOne;
R(1, 0) = angle_axis[2];
R(2, 0) = -angle_axis[1];
R(0, 1) = -angle_axis[2];
R(1, 1) = kOne;
R(2, 1) = angle_axis[0];
R(0, 2) = angle_axis[1];
R(1, 1) = kOne;
R(2, 1) = angle_axis[0];
R(0, 2) = angle_axis[1];
R(1, 2) = -angle_axis[0];
R(2, 2) = kOne;
}
@@ -422,8 +433,7 @@ inline void EulerAnglesToRotationMatrix(const T* euler,
template <typename T, int row_stride, int col_stride>
void EulerAnglesToRotationMatrix(
const T* euler,
const MatrixAdapter<T, row_stride, col_stride>& R) {
const T* euler, const MatrixAdapter<T, row_stride, col_stride>& R) {
const double kPi = 3.14159265358979323846;
const T degrees_to_radians(kPi / 180.0);
@@ -438,28 +448,27 @@ void EulerAnglesToRotationMatrix(
const T c3 = cos(pitch);
const T s3 = sin(pitch);
R(0, 0) = c1*c2;
R(0, 1) = -s1*c3 + c1*s2*s3;
R(0, 2) = s1*s3 + c1*s2*c3;
R(0, 0) = c1 * c2;
R(0, 1) = -s1 * c3 + c1 * s2 * s3;
R(0, 2) = s1 * s3 + c1 * s2 * c3;
R(1, 0) = s1*c2;
R(1, 1) = c1*c3 + s1*s2*s3;
R(1, 2) = -c1*s3 + s1*s2*c3;
R(1, 0) = s1 * c2;
R(1, 1) = c1 * c3 + s1 * s2 * s3;
R(1, 2) = -c1 * s3 + s1 * s2 * c3;
R(2, 0) = -s2;
R(2, 1) = c2*s3;
R(2, 2) = c2*c3;
R(2, 1) = c2 * s3;
R(2, 2) = c2 * c3;
}
template <typename T> inline
void QuaternionToScaledRotation(const T q[4], T R[3 * 3]) {
template <typename T>
inline void QuaternionToScaledRotation(const T q[4], T R[3 * 3]) {
QuaternionToScaledRotation(q, RowMajorAdapter3x3(R));
}
template <typename T, int row_stride, int col_stride> inline
void QuaternionToScaledRotation(
const T q[4],
const MatrixAdapter<T, row_stride, col_stride>& R) {
template <typename T, int row_stride, int col_stride>
inline void QuaternionToScaledRotation(
const T q[4], const MatrixAdapter<T, row_stride, col_stride>& R) {
// Make convenient names for elements of q.
T a = q[0];
T b = q[1];
@@ -478,22 +487,24 @@ void QuaternionToScaledRotation(
T cd = c * d;
T dd = d * d;
R(0, 0) = aa + bb - cc - dd; R(0, 1) = T(2) * (bc - ad); R(0, 2) = T(2) * (ac + bd); // NOLINT
R(1, 0) = T(2) * (ad + bc); R(1, 1) = aa - bb + cc - dd; R(1, 2) = T(2) * (cd - ab); // NOLINT
R(2, 0) = T(2) * (bd - ac); R(2, 1) = T(2) * (ab + cd); R(2, 2) = aa - bb - cc + dd; // NOLINT
// clang-format off
R(0, 0) = aa + bb - cc - dd; R(0, 1) = T(2) * (bc - ad); R(0, 2) = T(2) * (ac + bd);
R(1, 0) = T(2) * (ad + bc); R(1, 1) = aa - bb + cc - dd; R(1, 2) = T(2) * (cd - ab);
R(2, 0) = T(2) * (bd - ac); R(2, 1) = T(2) * (ab + cd); R(2, 2) = aa - bb - cc + dd;
// clang-format on
}
template <typename T> inline
void QuaternionToRotation(const T q[4], T R[3 * 3]) {
template <typename T>
inline void QuaternionToRotation(const T q[4], T R[3 * 3]) {
QuaternionToRotation(q, RowMajorAdapter3x3(R));
}
template <typename T, int row_stride, int col_stride> inline
void QuaternionToRotation(const T q[4],
const MatrixAdapter<T, row_stride, col_stride>& R) {
template <typename T, int row_stride, int col_stride>
inline void QuaternionToRotation(
const T q[4], const MatrixAdapter<T, row_stride, col_stride>& R) {
QuaternionToScaledRotation(q, R);
T normalizer = q[0]*q[0] + q[1]*q[1] + q[2]*q[2] + q[3]*q[3];
T normalizer = q[0] * q[0] + q[1] * q[1] + q[2] * q[2] + q[3] * q[3];
normalizer = T(1) / normalizer;
for (int i = 0; i < 3; ++i) {
@@ -503,8 +514,13 @@ void QuaternionToRotation(const T q[4],
}
}
template <typename T> inline
void UnitQuaternionRotatePoint(const T q[4], const T pt[3], T result[3]) {
template <typename T>
inline void UnitQuaternionRotatePoint(const T q[4],
const T pt[3],
T result[3]) {
DCHECK_NE(pt, result) << "Inplace rotation is not supported.";
// clang-format off
const T t2 = q[0] * q[1];
const T t3 = q[0] * q[2];
const T t4 = q[0] * q[3];
@@ -517,50 +533,63 @@ void UnitQuaternionRotatePoint(const T q[4], const T pt[3], T result[3]) {
result[0] = T(2) * ((t8 + t1) * pt[0] + (t6 - t4) * pt[1] + (t3 + t7) * pt[2]) + pt[0]; // NOLINT
result[1] = T(2) * ((t4 + t6) * pt[0] + (t5 + t1) * pt[1] + (t9 - t2) * pt[2]) + pt[1]; // NOLINT
result[2] = T(2) * ((t7 - t3) * pt[0] + (t2 + t9) * pt[1] + (t5 + t8) * pt[2]) + pt[2]; // NOLINT
// clang-format on
}
template <typename T> inline
void QuaternionRotatePoint(const T q[4], const T pt[3], T result[3]) {
template <typename T>
inline void QuaternionRotatePoint(const T q[4], const T pt[3], T result[3]) {
DCHECK_NE(pt, result) << "Inplace rotation is not supported.";
// 'scale' is 1 / norm(q).
const T scale = T(1) / sqrt(q[0] * q[0] +
q[1] * q[1] +
q[2] * q[2] +
q[3] * q[3]);
const T scale =
T(1) / sqrt(q[0] * q[0] + q[1] * q[1] + q[2] * q[2] + q[3] * q[3]);
// Make unit-norm version of q.
const T unit[4] = {
scale * q[0],
scale * q[1],
scale * q[2],
scale * q[3],
scale * q[0],
scale * q[1],
scale * q[2],
scale * q[3],
};
UnitQuaternionRotatePoint(unit, pt, result);
}
template<typename T> inline
void QuaternionProduct(const T z[4], const T w[4], T zw[4]) {
template <typename T>
inline void QuaternionProduct(const T z[4], const T w[4], T zw[4]) {
DCHECK_NE(z, zw) << "Inplace quaternion product is not supported.";
DCHECK_NE(w, zw) << "Inplace quaternion product is not supported.";
// clang-format off
zw[0] = z[0] * w[0] - z[1] * w[1] - z[2] * w[2] - z[3] * w[3];
zw[1] = z[0] * w[1] + z[1] * w[0] + z[2] * w[3] - z[3] * w[2];
zw[2] = z[0] * w[2] - z[1] * w[3] + z[2] * w[0] + z[3] * w[1];
zw[3] = z[0] * w[3] + z[1] * w[2] - z[2] * w[1] + z[3] * w[0];
// clang-format on
}
// xy = x cross y;
template<typename T> inline
void CrossProduct(const T x[3], const T y[3], T x_cross_y[3]) {
template <typename T>
inline void CrossProduct(const T x[3], const T y[3], T x_cross_y[3]) {
DCHECK_NE(x, x_cross_y) << "Inplace cross product is not supported.";
DCHECK_NE(y, x_cross_y) << "Inplace cross product is not supported.";
x_cross_y[0] = x[1] * y[2] - x[2] * y[1];
x_cross_y[1] = x[2] * y[0] - x[0] * y[2];
x_cross_y[2] = x[0] * y[1] - x[1] * y[0];
}
template<typename T> inline
T DotProduct(const T x[3], const T y[3]) {
template <typename T>
inline T DotProduct(const T x[3], const T y[3]) {
return (x[0] * y[0] + x[1] * y[1] + x[2] * y[2]);
}
template<typename T> inline
void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]) {
template <typename T>
inline void AngleAxisRotatePoint(const T angle_axis[3],
const T pt[3],
T result[3]) {
DCHECK_NE(pt, result) << "Inplace rotation is not supported.";
const T theta2 = DotProduct(angle_axis, angle_axis);
if (theta2 > T(std::numeric_limits<double>::epsilon())) {
// Away from zero, use the rodriguez formula
@@ -576,17 +605,17 @@ void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]) {
const T theta = sqrt(theta2);
const T costheta = cos(theta);
const T sintheta = sin(theta);
const T theta_inverse = 1.0 / theta;
const T theta_inverse = T(1.0) / theta;
const T w[3] = { angle_axis[0] * theta_inverse,
angle_axis[1] * theta_inverse,
angle_axis[2] * theta_inverse };
const T w[3] = {angle_axis[0] * theta_inverse,
angle_axis[1] * theta_inverse,
angle_axis[2] * theta_inverse};
// Explicitly inlined evaluation of the cross product for
// performance reasons.
const T w_cross_pt[3] = { w[1] * pt[2] - w[2] * pt[1],
w[2] * pt[0] - w[0] * pt[2],
w[0] * pt[1] - w[1] * pt[0] };
const T w_cross_pt[3] = {w[1] * pt[2] - w[2] * pt[1],
w[2] * pt[0] - w[0] * pt[2],
w[0] * pt[1] - w[1] * pt[0]};
const T tmp =
(w[0] * pt[0] + w[1] * pt[1] + w[2] * pt[2]) * (T(1.0) - costheta);
@@ -611,9 +640,9 @@ void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]) {
//
// Explicitly inlined evaluation of the cross product for
// performance reasons.
const T w_cross_pt[3] = { angle_axis[1] * pt[2] - angle_axis[2] * pt[1],
angle_axis[2] * pt[0] - angle_axis[0] * pt[2],
angle_axis[0] * pt[1] - angle_axis[1] * pt[0] };
const T w_cross_pt[3] = {angle_axis[1] * pt[2] - angle_axis[2] * pt[1],
angle_axis[2] * pt[0] - angle_axis[0] * pt[2],
angle_axis[0] * pt[1] - angle_axis[1] * pt[0]};
result[0] = pt[0] + w_cross_pt[0];
result[1] = pt[1] + w_cross_pt[1];

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -38,55 +38,30 @@
#ifndef CERES_PUBLIC_SIZED_COST_FUNCTION_H_
#define CERES_PUBLIC_SIZED_COST_FUNCTION_H_
#include "ceres/types.h"
#include "ceres/cost_function.h"
#include "ceres/types.h"
#include "glog/logging.h"
#include "internal/parameter_dims.h"
namespace ceres {
template<int kNumResiduals,
int N0 = 0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0,
int N5 = 0, int N6 = 0, int N7 = 0, int N8 = 0, int N9 = 0>
template <int kNumResiduals, int... Ns>
class SizedCostFunction : public CostFunction {
public:
static_assert(kNumResiduals > 0 || kNumResiduals == DYNAMIC,
"Cost functions must have at least one residual block.");
static_assert(internal::StaticParameterDims<Ns...>::kIsValid,
"Invalid parameter block dimension detected. Each parameter "
"block dimension must be bigger than zero.");
using ParameterDims = internal::StaticParameterDims<Ns...>;
SizedCostFunction() {
CHECK(kNumResiduals > 0 || kNumResiduals == DYNAMIC)
<< "Cost functions must have at least one residual block.";
// This block breaks the 80 column rule to keep it somewhat readable.
CHECK((!N1 && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && (N9 > 0))) // NOLINT
<< "Zero block cannot precede a non-zero block. Block sizes are "
<< "(ignore trailing 0s): " << N0 << ", " << N1 << ", " << N2 << ", "
<< N3 << ", " << N4 << ", " << N5 << ", " << N6 << ", " << N7 << ", "
<< N8 << ", " << N9;
set_num_residuals(kNumResiduals);
#define CERES_ADD_PARAMETER_BLOCK(N) \
if (N) mutable_parameter_block_sizes()->push_back(N);
CERES_ADD_PARAMETER_BLOCK(N0);
CERES_ADD_PARAMETER_BLOCK(N1);
CERES_ADD_PARAMETER_BLOCK(N2);
CERES_ADD_PARAMETER_BLOCK(N3);
CERES_ADD_PARAMETER_BLOCK(N4);
CERES_ADD_PARAMETER_BLOCK(N5);
CERES_ADD_PARAMETER_BLOCK(N6);
CERES_ADD_PARAMETER_BLOCK(N7);
CERES_ADD_PARAMETER_BLOCK(N8);
CERES_ADD_PARAMETER_BLOCK(N9);
#undef CERES_ADD_PARAMETER_BLOCK
*mutable_parameter_block_sizes() = std::vector<int32_t>{Ns...};
}
virtual ~SizedCostFunction() { }
virtual ~SizedCostFunction() {}
// Subclasses must implement Evaluate().
};

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -32,20 +32,21 @@
#define CERES_PUBLIC_SOLVER_H_
#include <cmath>
#include <memory>
#include <string>
#include <unordered_set>
#include <vector>
#include "ceres/crs_matrix.h"
#include "ceres/internal/macros.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/port.h"
#include "ceres/iteration_callback.h"
#include "ceres/ordered_groups.h"
#include "ceres/problem.h"
#include "ceres/types.h"
#include "ceres/internal/disable_warnings.h"
namespace ceres {
class Problem;
// Interface for non-linear least squares solvers.
class CERES_EXPORT Solver {
public:
@@ -57,87 +58,6 @@ class CERES_EXPORT Solver {
//
// The constants are defined inside types.h
struct CERES_EXPORT Options {
// Default constructor that sets up a generic sparse problem.
Options() {
minimizer_type = TRUST_REGION;
line_search_direction_type = LBFGS;
line_search_type = WOLFE;
nonlinear_conjugate_gradient_type = FLETCHER_REEVES;
max_lbfgs_rank = 20;
use_approximate_eigenvalue_bfgs_scaling = false;
line_search_interpolation_type = CUBIC;
min_line_search_step_size = 1e-9;
line_search_sufficient_function_decrease = 1e-4;
max_line_search_step_contraction = 1e-3;
min_line_search_step_contraction = 0.6;
max_num_line_search_step_size_iterations = 20;
max_num_line_search_direction_restarts = 5;
line_search_sufficient_curvature_decrease = 0.9;
max_line_search_step_expansion = 10.0;
trust_region_strategy_type = LEVENBERG_MARQUARDT;
dogleg_type = TRADITIONAL_DOGLEG;
use_nonmonotonic_steps = false;
max_consecutive_nonmonotonic_steps = 5;
max_num_iterations = 50;
max_solver_time_in_seconds = 1e9;
num_threads = 1;
initial_trust_region_radius = 1e4;
max_trust_region_radius = 1e16;
min_trust_region_radius = 1e-32;
min_relative_decrease = 1e-3;
min_lm_diagonal = 1e-6;
max_lm_diagonal = 1e32;
max_num_consecutive_invalid_steps = 5;
function_tolerance = 1e-6;
gradient_tolerance = 1e-10;
parameter_tolerance = 1e-8;
#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE) && !defined(CERES_ENABLE_LGPL_CODE) // NOLINT
linear_solver_type = DENSE_QR;
#else
linear_solver_type = SPARSE_NORMAL_CHOLESKY;
#endif
preconditioner_type = JACOBI;
visibility_clustering_type = CANONICAL_VIEWS;
dense_linear_algebra_library_type = EIGEN;
// Choose a default sparse linear algebra library in the order:
//
// SUITE_SPARSE > CX_SPARSE > EIGEN_SPARSE > NO_SPARSE
sparse_linear_algebra_library_type = NO_SPARSE;
#if !defined(CERES_NO_SUITESPARSE)
sparse_linear_algebra_library_type = SUITE_SPARSE;
#else
#if !defined(CERES_NO_CXSPARSE)
sparse_linear_algebra_library_type = CX_SPARSE;
#else
#if defined(CERES_USE_EIGEN_SPARSE)
sparse_linear_algebra_library_type = EIGEN_SPARSE;
#endif
#endif
#endif
num_linear_solver_threads = 1;
use_explicit_schur_complement = false;
use_postordering = false;
dynamic_sparsity = false;
min_linear_solver_iterations = 0;
max_linear_solver_iterations = 500;
eta = 1e-1;
jacobi_scaling = true;
use_inner_iterations = false;
inner_iteration_tolerance = 1e-3;
logging_type = PER_MINIMIZER_ITERATION;
minimizer_progress_to_stdout = false;
trust_region_problem_dump_directory = "/tmp";
trust_region_problem_dump_format_type = TEXTFILE;
check_gradients = false;
gradient_check_relative_precision = 1e-8;
gradient_check_numeric_derivative_relative_step_size = 1e-6;
update_state_every_iteration = false;
}
// Returns true if the options struct has a valid
// configuration. Returns false otherwise, and fills in *error
// with a message describing the problem.
@@ -157,7 +77,7 @@ class CERES_EXPORT Solver {
// exactly or inexactly.
//
// 2. The trust region approach approximates the objective
// function using using a model function (often a quadratic) over
// function using a model function (often a quadratic) over
// a subset of the search space known as the trust region. If the
// model function succeeds in minimizing the true objective
// function the trust region is expanded; conversely, otherwise it
@@ -168,11 +88,12 @@ class CERES_EXPORT Solver {
// trust region methods first choose a step size (the size of the
// trust region) and then a step direction while line search methods
// first choose a step direction and then a step size.
MinimizerType minimizer_type;
MinimizerType minimizer_type = TRUST_REGION;
LineSearchDirectionType line_search_direction_type;
LineSearchType line_search_type;
NonlinearConjugateGradientType nonlinear_conjugate_gradient_type;
LineSearchDirectionType line_search_direction_type = LBFGS;
LineSearchType line_search_type = WOLFE;
NonlinearConjugateGradientType nonlinear_conjugate_gradient_type =
FLETCHER_REEVES;
// The LBFGS hessian approximation is a low rank approximation to
// the inverse of the Hessian matrix. The rank of the
@@ -197,8 +118,8 @@ class CERES_EXPORT Solver {
// method, please see:
//
// Nocedal, J. (1980). "Updating Quasi-Newton Matrices with
// Limited Storage". Mathematics of Computation 35 (151): 773782.
int max_lbfgs_rank;
// Limited Storage". Mathematics of Computation 35 (151): 773-782.
int max_lbfgs_rank = 20;
// As part of the (L)BFGS update step (BFGS) / right-multiply step (L-BFGS),
// the initial inverse Hessian approximation is taken to be the Identity.
@@ -220,18 +141,18 @@ class CERES_EXPORT Solver {
// Oren S.S., Self-scaling variable metric (SSVM) algorithms
// Part II: Implementation and experiments, Management Science,
// 20(5), 863-874, 1974.
bool use_approximate_eigenvalue_bfgs_scaling;
bool use_approximate_eigenvalue_bfgs_scaling = false;
// Degree of the polynomial used to approximate the objective
// function. Valid values are BISECTION, QUADRATIC and CUBIC.
//
// BISECTION corresponds to pure backtracking search with no
// interpolation.
LineSearchInterpolationType line_search_interpolation_type;
LineSearchInterpolationType line_search_interpolation_type = CUBIC;
// If during the line search, the step_size falls below this
// value, it is truncated to zero.
double min_line_search_step_size;
double min_line_search_step_size = 1e-9;
// Line search parameters.
@@ -245,7 +166,7 @@ class CERES_EXPORT Solver {
//
// f(step_size) <= f(0) + sufficient_decrease * f'(0) * step_size
//
double line_search_sufficient_function_decrease;
double line_search_sufficient_function_decrease = 1e-4;
// In each iteration of the line search,
//
@@ -255,7 +176,7 @@ class CERES_EXPORT Solver {
//
// 0 < max_step_contraction < min_step_contraction < 1
//
double max_line_search_step_contraction;
double max_line_search_step_contraction = 1e-3;
// In each iteration of the line search,
//
@@ -265,19 +186,25 @@ class CERES_EXPORT Solver {
//
// 0 < max_step_contraction < min_step_contraction < 1
//
double min_line_search_step_contraction;
double min_line_search_step_contraction = 0.6;
// Maximum number of trial step size iterations during each line search,
// if a step size satisfying the search conditions cannot be found within
// this number of trials, the line search will terminate.
int max_num_line_search_step_size_iterations;
// Maximum number of trial step size iterations during each line
// search, if a step size satisfying the search conditions cannot
// be found within this number of trials, the line search will
// terminate.
// The minimum allowed value is 0 for trust region minimizer and 1
// otherwise. If 0 is specified for the trust region minimizer,
// then line search will not be used when solving constrained
// optimization problems.
int max_num_line_search_step_size_iterations = 20;
// Maximum number of restarts of the line search direction algorithm before
// terminating the optimization. Restarts of the line search direction
// algorithm occur when the current algorithm fails to produce a new descent
// direction. This typically indicates a numerical failure, or a breakdown
// in the validity of the approximations used.
int max_num_line_search_direction_restarts;
int max_num_line_search_direction_restarts = 5;
// The strong Wolfe conditions consist of the Armijo sufficient
// decrease condition, and an additional requirement that the
@@ -290,7 +217,7 @@ class CERES_EXPORT Solver {
//
// Where f() is the line search objective and f'() is the derivative
// of f w.r.t step_size (d f / d step_size).
double line_search_sufficient_curvature_decrease;
double line_search_sufficient_curvature_decrease = 0.9;
// During the bracketing phase of the Wolfe search, the step size is
// increased until either a point satisfying the Wolfe conditions is
@@ -301,12 +228,12 @@ class CERES_EXPORT Solver {
// new_step_size <= max_step_expansion * step_size.
//
// By definition for expansion, max_step_expansion > 1.0.
double max_line_search_step_expansion;
double max_line_search_step_expansion = 10.0;
TrustRegionStrategyType trust_region_strategy_type;
TrustRegionStrategyType trust_region_strategy_type = LEVENBERG_MARQUARDT;
// Type of dogleg strategy to use.
DoglegType dogleg_type;
DoglegType dogleg_type = TRADITIONAL_DOGLEG;
// The classical trust region methods are descent methods, in that
// they only accept a point if it strictly reduces the value of
@@ -317,7 +244,7 @@ class CERES_EXPORT Solver {
// in the value of the objective function.
//
// This is because allowing for non-decreasing objective function
// values in a princpled manner allows the algorithm to "jump over
// values in a principled manner allows the algorithm to "jump over
// boulders" as the method is not restricted to move into narrow
// valleys while preserving its convergence properties.
//
@@ -333,30 +260,30 @@ class CERES_EXPORT Solver {
// than the minimum value encountered over the course of the
// optimization, the final parameters returned to the user are the
// ones corresponding to the minimum cost over all iterations.
bool use_nonmonotonic_steps;
int max_consecutive_nonmonotonic_steps;
bool use_nonmonotonic_steps = false;
int max_consecutive_nonmonotonic_steps = 5;
// Maximum number of iterations for the minimizer to run for.
int max_num_iterations;
int max_num_iterations = 50;
// Maximum time for which the minimizer should run for.
double max_solver_time_in_seconds;
double max_solver_time_in_seconds = 1e9;
// Number of threads used by Ceres for evaluating the cost and
// jacobians.
int num_threads;
int num_threads = 1;
// Trust region minimizer settings.
double initial_trust_region_radius;
double max_trust_region_radius;
double initial_trust_region_radius = 1e4;
double max_trust_region_radius = 1e16;
// Minimizer terminates when the trust region radius becomes
// smaller than this value.
double min_trust_region_radius;
double min_trust_region_radius = 1e-32;
// Lower bound for the relative decrease before a step is
// accepted.
double min_relative_decrease;
double min_relative_decrease = 1e-3;
// For the Levenberg-Marquadt algorithm, the scaled diagonal of
// the normal equations J'J is used to control the size of the
@@ -365,46 +292,75 @@ class CERES_EXPORT Solver {
// fail. max_lm_diagonal and min_lm_diagonal, clamp the values of
// diag(J'J) from above and below. In the normal course of
// operation, the user should not have to modify these parameters.
double min_lm_diagonal;
double max_lm_diagonal;
double min_lm_diagonal = 1e-6;
double max_lm_diagonal = 1e32;
// Sometimes due to numerical conditioning problems or linear
// solver flakiness, the trust region strategy may return a
// numerically invalid step that can be fixed by reducing the
// trust region size. So the TrustRegionMinimizer allows for a few
// successive invalid steps before it declares NUMERICAL_FAILURE.
int max_num_consecutive_invalid_steps;
int max_num_consecutive_invalid_steps = 5;
// Minimizer terminates when
//
// (new_cost - old_cost) < function_tolerance * old_cost;
//
double function_tolerance;
double function_tolerance = 1e-6;
// Minimizer terminates when
//
// max_i |x - Project(Plus(x, -g(x))| < gradient_tolerance
//
// This value should typically be 1e-4 * function_tolerance.
double gradient_tolerance;
double gradient_tolerance = 1e-10;
// Minimizer terminates when
//
// |step|_2 <= parameter_tolerance * ( |x|_2 + parameter_tolerance)
//
double parameter_tolerance;
double parameter_tolerance = 1e-8;
// Linear least squares solver options -------------------------------------
LinearSolverType linear_solver_type;
LinearSolverType linear_solver_type =
#if defined(CERES_NO_SPARSE)
DENSE_QR;
#else
SPARSE_NORMAL_CHOLESKY;
#endif
// Type of preconditioner to use with the iterative linear solvers.
PreconditionerType preconditioner_type;
PreconditionerType preconditioner_type = JACOBI;
// Type of clustering algorithm to use for visibility based
// preconditioning. This option is used only when the
// preconditioner_type is CLUSTER_JACOBI or CLUSTER_TRIDIAGONAL.
VisibilityClusteringType visibility_clustering_type;
VisibilityClusteringType visibility_clustering_type = CANONICAL_VIEWS;
// Subset preconditioner is a preconditioner for problems with
// general sparsity. Given a subset of residual blocks of a
// problem, it uses the corresponding subset of the rows of the
// Jacobian to construct a preconditioner.
//
// Suppose the Jacobian J has been horizontally partitioned as
//
// J = [P]
// [Q]
//
// Where, Q is the set of rows corresponding to the residual
// blocks in residual_blocks_for_subset_preconditioner.
//
// The preconditioner is the inverse of the matrix Q'Q.
//
// Obviously, the efficacy of the preconditioner depends on how
// well the matrix Q approximates J'J, or how well the chosen
// residual blocks approximate the non-linear least squares
// problem.
//
// If Solver::Options::preconditioner_type == SUBSET, then
// residual_blocks_for_subset_preconditioner must be non-empty.
std::unordered_set<ResidualBlockId> residual_blocks_for_subset_preconditioner;
// Ceres supports using multiple dense linear algebra libraries
// for dense matrix factorizations. Currently EIGEN and LAPACK are
@@ -413,22 +369,28 @@ class CERES_EXPORT Solver {
// available.
//
// This setting affects the DENSE_QR, DENSE_NORMAL_CHOLESKY and
// DENSE_SCHUR solvers. For small to moderate sized probem EIGEN
// DENSE_SCHUR solvers. For small to moderate sized problem EIGEN
// is a fine choice but for large problems, an optimized LAPACK +
// BLAS implementation can make a substantial difference in
// performance.
DenseLinearAlgebraLibraryType dense_linear_algebra_library_type;
DenseLinearAlgebraLibraryType dense_linear_algebra_library_type = EIGEN;
// Ceres supports using multiple sparse linear algebra libraries
// for sparse matrix ordering and factorizations. Currently,
// SUITE_SPARSE and CX_SPARSE are the valid choices, depending on
// whether they are linked into Ceres at build time.
SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type;
// Number of threads used by Ceres to solve the Newton
// step. Currently only the SPARSE_SCHUR solver is capable of
// using this setting.
int num_linear_solver_threads;
SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type =
#if !defined(CERES_NO_SUITESPARSE)
SUITE_SPARSE;
#elif defined(CERES_USE_EIGEN_SPARSE)
EIGEN_SPARSE;
#elif !defined(CERES_NO_CXSPARSE)
CX_SPARSE;
#elif !defined(CERES_NO_ACCELERATE_SPARSE)
ACCELERATE_SPARSE;
#else
NO_SPARSE;
#endif
// The order in which variables are eliminated in a linear solver
// can have a significant of impact on the efficiency and accuracy
@@ -456,7 +418,7 @@ class CERES_EXPORT Solver {
//
// Given such an ordering, Ceres ensures that the parameter blocks in
// the lowest numbered group are eliminated first, and then the
// parmeter blocks in the next lowest numbered group and so on. Within
// parameter blocks in the next lowest numbered group and so on. Within
// each group, Ceres is free to order the parameter blocks as it
// chooses.
//
@@ -496,13 +458,13 @@ class CERES_EXPORT Solver {
// the parameter blocks into two groups, one for the points and one
// for the cameras, where the group containing the points has an id
// smaller than the group containing cameras.
shared_ptr<ParameterBlockOrdering> linear_solver_ordering;
std::shared_ptr<ParameterBlockOrdering> linear_solver_ordering;
// Use an explicitly computed Schur complement matrix with
// ITERATIVE_SCHUR.
//
// By default this option is disabled and ITERATIVE_SCHUR
// evaluates evaluates matrix-vector products between the Schur
// evaluates matrix-vector products between the Schur
// complement and a vector implicitly by exploiting the algebraic
// expression for the Schur complement.
//
@@ -519,7 +481,7 @@ class CERES_EXPORT Solver {
//
// NOTE: This option can only be used with the SCHUR_JACOBI
// preconditioner.
bool use_explicit_schur_complement;
bool use_explicit_schur_complement = false;
// Sparse Cholesky factorization algorithms use a fill-reducing
// ordering to permute the columns of the Jacobian matrix. There
@@ -540,7 +502,7 @@ class CERES_EXPORT Solver {
// reordering algorithm which has slightly better runtime
// performance at the expense of an extra copy of the Jacobian
// matrix. Setting use_postordering to true enables this tradeoff.
bool use_postordering;
bool use_postordering = false;
// Some non-linear least squares problems are symbolically dense but
// numerically sparse. i.e. at any given state only a small number
@@ -554,8 +516,32 @@ class CERES_EXPORT Solver {
// then it is probably best to keep this false, otherwise it will
// likely lead to worse performance.
// This settings affects the SPARSE_NORMAL_CHOLESKY solver.
bool dynamic_sparsity;
// This settings only affects the SPARSE_NORMAL_CHOLESKY solver.
bool dynamic_sparsity = false;
// TODO(sameeragarwal): Further expand the documentation for the
// following two options.
// NOTE1: EXPERIMENTAL FEATURE, UNDER DEVELOPMENT, USE AT YOUR OWN RISK.
//
// If use_mixed_precision_solves is true, the Gauss-Newton matrix
// is computed in double precision, but its factorization is
// computed in single precision. This can result in significant
// time and memory savings at the cost of some accuracy in the
// Gauss-Newton step. Iterative refinement is used to recover some
// of this accuracy back.
//
// If use_mixed_precision_solves is true, we recommend setting
// max_num_refinement_iterations to 2-3.
//
// NOTE2: The following two options are currently only applicable
// if sparse_linear_algebra_library_type is EIGEN_SPARSE and
// linear_solver_type is SPARSE_NORMAL_CHOLESKY, or SPARSE_SCHUR.
bool use_mixed_precision_solves = false;
// Number steps of the iterative refinement process to run when
// computing the Gauss-Newton step.
int max_num_refinement_iterations = 0;
// Some non-linear least squares problems have additional
// structure in the way the parameter blocks interact that it is
@@ -583,7 +569,7 @@ class CERES_EXPORT Solver {
// known as Wiberg's algorithm.
//
// Ruhe & Wedin (Algorithms for Separable Nonlinear Least Squares
// Problems, SIAM Reviews, 22(3), 1980) present an analyis of
// Problems, SIAM Reviews, 22(3), 1980) present an analysis of
// various algorithms for solving separable non-linear least
// squares problems and refer to "Variable Projection" as
// Algorithm I in their paper.
@@ -615,7 +601,7 @@ class CERES_EXPORT Solver {
// displays better convergence behaviour per iteration. Setting
// Solver::Options::num_threads to the maximum number possible is
// highly recommended.
bool use_inner_iterations;
bool use_inner_iterations = false;
// If inner_iterations is true, then the user has two choices.
//
@@ -627,7 +613,7 @@ class CERES_EXPORT Solver {
// the lower numbered groups are optimized before the higher
// number groups. Each group must be an independent set. Not
// all parameter blocks need to be present in the ordering.
shared_ptr<ParameterBlockOrdering> inner_iteration_ordering;
std::shared_ptr<ParameterBlockOrdering> inner_iteration_ordering;
// Generally speaking, inner iterations make significant progress
// in the early stages of the solve and then their contribution
@@ -638,17 +624,17 @@ class CERES_EXPORT Solver {
// inner iterations drops below inner_iteration_tolerance, the use
// of inner iterations in subsequent trust region minimizer
// iterations is disabled.
double inner_iteration_tolerance;
double inner_iteration_tolerance = 1e-3;
// Minimum number of iterations for which the linear solver should
// run, even if the convergence criterion is satisfied.
int min_linear_solver_iterations;
int min_linear_solver_iterations = 0;
// Maximum number of iterations for which the linear solver should
// run. If the solver does not converge in less than
// max_linear_solver_iterations, then it returns MAX_ITERATIONS,
// as its termination type.
int max_linear_solver_iterations;
int max_linear_solver_iterations = 500;
// Forcing sequence parameter. The truncated Newton solver uses
// this number to control the relative accuracy with which the
@@ -658,21 +644,21 @@ class CERES_EXPORT Solver {
// it to terminate the iterations when
//
// (Q_i - Q_{i-1})/Q_i < eta/i
double eta;
double eta = 1e-1;
// Normalize the jacobian using Jacobi scaling before calling
// the linear least squares solver.
bool jacobi_scaling;
bool jacobi_scaling = true;
// Logging options ---------------------------------------------------------
LoggingType logging_type;
LoggingType logging_type = PER_MINIMIZER_ITERATION;
// By default the Minimizer progress is logged to VLOG(1), which
// is sent to STDERR depending on the vlog level. If this flag is
// set to true, and logging_type is not SILENT, the logging output
// is sent to STDOUT.
bool minimizer_progress_to_stdout;
bool minimizer_progress_to_stdout = false;
// List of iterations at which the minimizer should dump the trust
// region problem. Useful for testing and benchmarking. If empty
@@ -683,8 +669,8 @@ class CERES_EXPORT Solver {
// non-empty if trust_region_minimizer_iterations_to_dump is
// non-empty and trust_region_problem_dump_format_type is not
// CONSOLE.
std::string trust_region_problem_dump_directory;
DumpFormatType trust_region_problem_dump_format_type;
std::string trust_region_problem_dump_directory = "/tmp";
DumpFormatType trust_region_problem_dump_format_type = TEXTFILE;
// Finite differences options ----------------------------------------------
@@ -694,12 +680,12 @@ class CERES_EXPORT Solver {
// etc), then also computing it using finite differences. The
// results are compared, and if they differ substantially, details
// are printed to the log.
bool check_gradients;
bool check_gradients = false;
// Relative precision to check for in the gradient checker. If the
// relative difference between an element in a jacobian exceeds
// this number, then the jacobian for that cost term is dumped.
double gradient_check_relative_precision;
double gradient_check_relative_precision = 1e-8;
// WARNING: This option only applies to the to the numeric
// differentiation used for checking the user provided derivatives
@@ -723,7 +709,7 @@ class CERES_EXPORT Solver {
//
// The finite differencing is done along each dimension. The
// reason to use a relative (rather than absolute) step size is
// that this way, numeric differentation works for functions where
// that this way, numeric differentiation works for functions where
// the arguments are typically large (e.g. 1e9) and when the
// values are small (e.g. 1e-5). It is possible to construct
// "torture cases" which break this finite difference heuristic,
@@ -733,14 +719,21 @@ class CERES_EXPORT Solver {
// theory a good choice is sqrt(eps) * x, which for doubles means
// about 1e-8 * x. However, I have found this number too
// optimistic. This number should be exposed for users to change.
double gradient_check_numeric_derivative_relative_step_size;
double gradient_check_numeric_derivative_relative_step_size = 1e-6;
// If true, the user's parameter blocks are updated at the end of
// every Minimizer iteration, otherwise they are updated when the
// Minimizer terminates. This is useful if, for example, the user
// wishes to visualize the state of the optimization every
// iteration.
bool update_state_every_iteration;
// If update_state_every_iteration is true, then Ceres Solver will
// guarantee that at the end of every iteration and before any
// user provided IterationCallback is called, the parameter blocks
// are updated to the current best solution found by the
// solver. Thus the IterationCallback can inspect the values of
// the parameter blocks for purposes of computation, visualization
// or termination.
// If update_state_every_iteration is false then there is no such
// guarantee, and user provided IterationCallbacks should not
// expect to look at the parameter blocks and interpret their
// values.
bool update_state_every_iteration = false;
// Callbacks that are executed at the end of each iteration of the
// Minimizer. An iteration may terminate midway, either due to
@@ -749,20 +742,18 @@ class CERES_EXPORT Solver {
// executed.
// Callbacks are executed in the order that they are specified in
// this vector. By default, parameter blocks are updated only at
// the end of the optimization, i.e when the Minimizer
// terminates. This behaviour is controlled by
// update_state_every_variable. If the user wishes to have access
// to the update parameter blocks when his/her callbacks are
// executed, then set update_state_every_iteration to true.
// this vector. By default, parameter blocks are updated only at the
// end of the optimization, i.e when the Minimizer terminates. This
// behaviour is controlled by update_state_every_iteration. If the
// user wishes to have access to the updated parameter blocks when
// his/her callbacks are executed, then set
// update_state_every_iteration to true.
//
// The solver does NOT take ownership of these pointers.
std::vector<IterationCallback*> callbacks;
};
struct CERES_EXPORT Summary {
Summary();
// A brief one line description of the state of the solver after
// termination.
std::string BriefReport() const;
@@ -774,25 +765,25 @@ class CERES_EXPORT Solver {
bool IsSolutionUsable() const;
// Minimizer summary -------------------------------------------------
MinimizerType minimizer_type;
MinimizerType minimizer_type = TRUST_REGION;
TerminationType termination_type;
TerminationType termination_type = FAILURE;
// Reason why the solver terminated.
std::string message;
std::string message = "ceres::Solve was not called.";
// Cost of the problem (value of the objective function) before
// the optimization.
double initial_cost;
double initial_cost = -1.0;
// Cost of the problem (value of the objective function) after the
// optimization.
double final_cost;
double final_cost = -1.0;
// The part of the total cost that comes from residual blocks that
// were held fixed by the preprocessor because all the parameter
// blocks that they depend on were fixed.
double fixed_cost;
double fixed_cost = -1.0;
// IterationSummary for each minimizer iteration in order.
std::vector<IterationSummary> iterations;
@@ -801,22 +792,22 @@ class CERES_EXPORT Solver {
// accepted. Unless use_non_monotonic_steps is true this is also
// the number of steps in which the objective function value/cost
// went down.
int num_successful_steps;
int num_successful_steps = -1;
// Number of minimizer iterations in which the step was rejected
// either because it did not reduce the cost enough or the step
// was not numerically valid.
int num_unsuccessful_steps;
int num_unsuccessful_steps = -1;
// Number of times inner iterations were performed.
int num_inner_iteration_steps;
int num_inner_iteration_steps = -1;
// Total number of iterations inside the line search algorithm
// across all invocations. We call these iterations "steps" to
// distinguish them from the outer iterations of the line search
// and trust region minimizer algorithms which call the line
// search algorithm as a subroutine.
int num_line_search_steps;
int num_line_search_steps = -1;
// All times reported below are wall times.
@@ -824,31 +815,42 @@ class CERES_EXPORT Solver {
// occurs, Ceres performs a number of preprocessing steps. These
// include error checks, memory allocations, and reorderings. This
// time is accounted for as preprocessing time.
double preprocessor_time_in_seconds;
double preprocessor_time_in_seconds = -1.0;
// Time spent in the TrustRegionMinimizer.
double minimizer_time_in_seconds;
double minimizer_time_in_seconds = -1.0;
// After the Minimizer is finished, some time is spent in
// re-evaluating residuals etc. This time is accounted for in the
// postprocessor time.
double postprocessor_time_in_seconds;
double postprocessor_time_in_seconds = -1.0;
// Some total of all time spent inside Ceres when Solve is called.
double total_time_in_seconds;
double total_time_in_seconds = -1.0;
// Time (in seconds) spent in the linear solver computing the
// trust region step.
double linear_solver_time_in_seconds;
double linear_solver_time_in_seconds = -1.0;
// Number of times the Newton step was computed by solving a
// linear system. This does not include linear solves used by
// inner iterations.
int num_linear_solves = -1;
// Time (in seconds) spent evaluating the residual vector.
double residual_evaluation_time_in_seconds;
double residual_evaluation_time_in_seconds = 1.0;
// Number of residual only evaluations.
int num_residual_evaluations = -1;
// Time (in seconds) spent evaluating the jacobian matrix.
double jacobian_evaluation_time_in_seconds;
double jacobian_evaluation_time_in_seconds = -1.0;
// Number of Jacobian (and residual) evaluations.
int num_jacobian_evaluations = -1;
// Time (in seconds) spent doing inner iterations.
double inner_iteration_time_in_seconds;
double inner_iteration_time_in_seconds = -1.0;
// Cumulative timing information for line searches performed as part of the
// solve. Note that in addition to the case when the Line Search minimizer
@@ -857,89 +859,89 @@ class CERES_EXPORT Solver {
// Time (in seconds) spent evaluating the univariate cost function as part
// of a line search.
double line_search_cost_evaluation_time_in_seconds;
double line_search_cost_evaluation_time_in_seconds = -1.0;
// Time (in seconds) spent evaluating the gradient of the univariate cost
// function as part of a line search.
double line_search_gradient_evaluation_time_in_seconds;
double line_search_gradient_evaluation_time_in_seconds = -1.0;
// Time (in seconds) spent minimizing the interpolating polynomial
// to compute the next candidate step size as part of a line search.
double line_search_polynomial_minimization_time_in_seconds;
double line_search_polynomial_minimization_time_in_seconds = -1.0;
// Total time (in seconds) spent performing line searches.
double line_search_total_time_in_seconds;
double line_search_total_time_in_seconds = -1.0;
// Number of parameter blocks in the problem.
int num_parameter_blocks;
int num_parameter_blocks = -1;
// Number of parameters in the probem.
int num_parameters;
// Number of parameters in the problem.
int num_parameters = -1;
// Dimension of the tangent space of the problem (or the number of
// columns in the Jacobian for the problem). This is different
// from num_parameters if a parameter block is associated with a
// LocalParameterization
int num_effective_parameters;
int num_effective_parameters = -1;
// Number of residual blocks in the problem.
int num_residual_blocks;
int num_residual_blocks = -1;
// Number of residuals in the problem.
int num_residuals;
int num_residuals = -1;
// Number of parameter blocks in the problem after the inactive
// and constant parameter blocks have been removed. A parameter
// block is inactive if no residual block refers to it.
int num_parameter_blocks_reduced;
int num_parameter_blocks_reduced = -1;
// Number of parameters in the reduced problem.
int num_parameters_reduced;
int num_parameters_reduced = -1;
// Dimension of the tangent space of the reduced problem (or the
// number of columns in the Jacobian for the reduced
// problem). This is different from num_parameters_reduced if a
// parameter block in the reduced problem is associated with a
// LocalParameterization.
int num_effective_parameters_reduced;
int num_effective_parameters_reduced = -1;
// Number of residual blocks in the reduced problem.
int num_residual_blocks_reduced;
int num_residual_blocks_reduced = -1;
// Number of residuals in the reduced problem.
int num_residuals_reduced;
int num_residuals_reduced = -1;
// Is the reduced problem bounds constrained.
bool is_constrained;
bool is_constrained = false;
// Number of threads specified by the user for Jacobian and
// residual evaluation.
int num_threads_given;
int num_threads_given = -1;
// Number of threads actually used by the solver for Jacobian and
// residual evaluation. This number is not equal to
// num_threads_given if OpenMP is not available.
int num_threads_used;
// Number of threads specified by the user for solving the trust
// region problem.
int num_linear_solver_threads_given;
// Number of threads actually used by the solver for solving the
// trust region problem. This number is not equal to
// num_threads_given if OpenMP is not available.
int num_linear_solver_threads_used;
int num_threads_used = -1;
// Type of the linear solver requested by the user.
LinearSolverType linear_solver_type_given;
LinearSolverType linear_solver_type_given =
#if defined(CERES_NO_SPARSE)
DENSE_QR;
#else
SPARSE_NORMAL_CHOLESKY;
#endif
// Type of the linear solver actually used. This may be different
// from linear_solver_type_given if Ceres determines that the
// problem structure is not compatible with the linear solver
// requested or if the linear solver requested by the user is not
// available, e.g. The user requested SPARSE_NORMAL_CHOLESKY but
// no sparse linear algebra library was available.
LinearSolverType linear_solver_type_used;
LinearSolverType linear_solver_type_used =
#if defined(CERES_NO_SPARSE)
DENSE_QR;
#else
SPARSE_NORMAL_CHOLESKY;
#endif
// Size of the elimination groups given by the user as hints to
// the linear solver.
@@ -953,15 +955,29 @@ class CERES_EXPORT Solver {
// parameter blocks.
std::vector<int> linear_solver_ordering_used;
// For Schur type linear solvers, this string describes the
// template specialization which was detected in the problem and
// should be used.
std::string schur_structure_given;
// This is the Schur template specialization that was actually
// instantiated and used. The reason this will be different from
// schur_structure_given is because the corresponding template
// specialization does not exist.
//
// Template specializations can be added to ceres by editing
// internal/ceres/generate_template_specializations.py
std::string schur_structure_used;
// True if the user asked for inner iterations to be used as part
// of the optimization.
bool inner_iterations_given;
bool inner_iterations_given = false;
// True if the user asked for inner iterations to be used as part
// of the optimization and the problem structure was such that
// they were actually performed. e.g., in a problem with just one
// parameter block, inner iterations are not performed.
bool inner_iterations_used;
bool inner_iterations_used = false;
// Size of the parameter groups given by the user for performing
// inner iterations.
@@ -976,57 +992,59 @@ class CERES_EXPORT Solver {
std::vector<int> inner_iteration_ordering_used;
// Type of the preconditioner requested by the user.
PreconditionerType preconditioner_type_given;
PreconditionerType preconditioner_type_given = IDENTITY;
// Type of the preconditioner actually used. This may be different
// from linear_solver_type_given if Ceres determines that the
// problem structure is not compatible with the linear solver
// requested or if the linear solver requested by the user is not
// available.
PreconditionerType preconditioner_type_used;
PreconditionerType preconditioner_type_used = IDENTITY;
// Type of clustering algorithm used for visibility based
// preconditioning. Only meaningful when the preconditioner_type
// is CLUSTER_JACOBI or CLUSTER_TRIDIAGONAL.
VisibilityClusteringType visibility_clustering_type;
VisibilityClusteringType visibility_clustering_type = CANONICAL_VIEWS;
// Type of trust region strategy.
TrustRegionStrategyType trust_region_strategy_type;
TrustRegionStrategyType trust_region_strategy_type = LEVENBERG_MARQUARDT;
// Type of dogleg strategy used for solving the trust region
// problem.
DoglegType dogleg_type;
DoglegType dogleg_type = TRADITIONAL_DOGLEG;
// Type of the dense linear algebra library used.
DenseLinearAlgebraLibraryType dense_linear_algebra_library_type;
DenseLinearAlgebraLibraryType dense_linear_algebra_library_type = EIGEN;
// Type of the sparse linear algebra library used.
SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type;
SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type =
NO_SPARSE;
// Type of line search direction used.
LineSearchDirectionType line_search_direction_type;
LineSearchDirectionType line_search_direction_type = LBFGS;
// Type of the line search algorithm used.
LineSearchType line_search_type;
LineSearchType line_search_type = WOLFE;
// When performing line search, the degree of the polynomial used
// to approximate the objective function.
LineSearchInterpolationType line_search_interpolation_type;
LineSearchInterpolationType line_search_interpolation_type = CUBIC;
// If the line search direction is NONLINEAR_CONJUGATE_GRADIENT,
// then this indicates the particular variant of non-linear
// conjugate gradient used.
NonlinearConjugateGradientType nonlinear_conjugate_gradient_type;
NonlinearConjugateGradientType nonlinear_conjugate_gradient_type =
FLETCHER_REEVES;
// If the type of the line search direction is LBFGS, then this
// indicates the rank of the Hessian approximation.
int max_lbfgs_rank;
int max_lbfgs_rank = -1;
};
// Once a least squares problem has been built, this function takes
// the problem and optimizes it based on the values of the options
// parameters. Upon return, a detailed summary of the work performed
// by the preprocessor, the non-linear minmizer and the linear
// by the preprocessor, the non-linear minimizer and the linear
// solver are reported in the summary object.
virtual void Solve(const Options& options,
Problem* problem,
@@ -1035,8 +1053,8 @@ class CERES_EXPORT Solver {
// Helper function which avoids going through the interface.
CERES_EXPORT void Solve(const Solver::Options& options,
Problem* problem,
Solver::Summary* summary);
Problem* problem,
Solver::Summary* summary);
} // namespace ceres

368
extern/ceres/include/ceres/tiny_solver.h vendored Normal file
View File

@@ -0,0 +1,368 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: mierle@gmail.com (Keir Mierle)
//
// WARNING WARNING WARNING
// WARNING WARNING WARNING Tiny solver is experimental and will change.
// WARNING WARNING WARNING
//
// A tiny least squares solver using Levenberg-Marquardt, intended for solving
// small dense problems with low latency and low overhead. The implementation
// takes care to do all allocation up front, so that no memory is allocated
// during solving. This is especially useful when solving many similar problems;
// for example, inverse pixel distortion for every pixel on a grid.
//
// Note: This code has no dependencies beyond Eigen, including on other parts of
// Ceres, so it is possible to take this file alone and put it in another
// project without the rest of Ceres.
//
// Algorithm based off of:
//
// [1] K. Madsen, H. Nielsen, O. Tingleoff.
// Methods for Non-linear Least Squares Problems.
// http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf
#ifndef CERES_PUBLIC_TINY_SOLVER_H_
#define CERES_PUBLIC_TINY_SOLVER_H_
#include <cassert>
#include <cmath>
#include "Eigen/Dense"
namespace ceres {
// To use tiny solver, create a class or struct that allows computing the cost
// function (described below). This is similar to a ceres::CostFunction, but is
// different to enable statically allocating all memory for the solver
// (specifically, enum sizes). Key parts are the Scalar typedef, the enums to
// describe problem sizes (needed to remove all heap allocations), and the
// operator() overload to evaluate the cost and (optionally) jacobians.
//
// struct TinySolverCostFunctionTraits {
// typedef double Scalar;
// enum {
// NUM_RESIDUALS = <int> OR Eigen::Dynamic,
// NUM_PARAMETERS = <int> OR Eigen::Dynamic,
// };
// bool operator()(const double* parameters,
// double* residuals,
// double* jacobian) const;
//
// int NumResiduals() const; -- Needed if NUM_RESIDUALS == Eigen::Dynamic.
// int NumParameters() const; -- Needed if NUM_PARAMETERS == Eigen::Dynamic.
// };
//
// For operator(), the size of the objects is:
//
// double* parameters -- NUM_PARAMETERS or NumParameters()
// double* residuals -- NUM_RESIDUALS or NumResiduals()
// double* jacobian -- NUM_RESIDUALS * NUM_PARAMETERS in column-major format
// (Eigen's default); or NULL if no jacobian requested.
//
// An example (fully statically sized):
//
// struct MyCostFunctionExample {
// typedef double Scalar;
// enum {
// NUM_RESIDUALS = 2,
// NUM_PARAMETERS = 3,
// };
// bool operator()(const double* parameters,
// double* residuals,
// double* jacobian) const {
// residuals[0] = x + 2*y + 4*z;
// residuals[1] = y * z;
// if (jacobian) {
// jacobian[0 * 2 + 0] = 1; // First column (x).
// jacobian[0 * 2 + 1] = 0;
//
// jacobian[1 * 2 + 0] = 2; // Second column (y).
// jacobian[1 * 2 + 1] = z;
//
// jacobian[2 * 2 + 0] = 4; // Third column (z).
// jacobian[2 * 2 + 1] = y;
// }
// return true;
// }
// };
//
// The solver supports either statically or dynamically sized cost
// functions. If the number of residuals is dynamic then the Function
// must define:
//
// int NumResiduals() const;
//
// If the number of parameters is dynamic then the Function must
// define:
//
// int NumParameters() const;
//
template <typename Function,
typename LinearSolver =
Eigen::LDLT<Eigen::Matrix<typename Function::Scalar,
Function::NUM_PARAMETERS,
Function::NUM_PARAMETERS>>>
class TinySolver {
public:
// This class needs to have an Eigen aligned operator new as it contains
// fixed-size Eigen types.
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
enum {
NUM_RESIDUALS = Function::NUM_RESIDUALS,
NUM_PARAMETERS = Function::NUM_PARAMETERS
};
typedef typename Function::Scalar Scalar;
typedef typename Eigen::Matrix<Scalar, NUM_PARAMETERS, 1> Parameters;
enum Status {
GRADIENT_TOO_SMALL, // eps > max(J'*f(x))
RELATIVE_STEP_SIZE_TOO_SMALL, // eps > ||dx|| / (||x|| + eps)
COST_TOO_SMALL, // eps > ||f(x)||^2 / 2
HIT_MAX_ITERATIONS,
// TODO(sameeragarwal): Deal with numerical failures.
};
struct Options {
Scalar gradient_tolerance = 1e-10; // eps > max(J'*f(x))
Scalar parameter_tolerance = 1e-8; // eps > ||dx|| / ||x||
Scalar cost_threshold = // eps > ||f(x)||
std::numeric_limits<Scalar>::epsilon();
Scalar initial_trust_region_radius = 1e4;
int max_num_iterations = 50;
};
struct Summary {
Scalar initial_cost = -1; // 1/2 ||f(x)||^2
Scalar final_cost = -1; // 1/2 ||f(x)||^2
Scalar gradient_max_norm = -1; // max(J'f(x))
int iterations = -1;
Status status = HIT_MAX_ITERATIONS;
};
bool Update(const Function& function, const Parameters& x) {
if (!function(x.data(), error_.data(), jacobian_.data())) {
return false;
}
error_ = -error_;
// On the first iteration, compute a diagonal (Jacobi) scaling
// matrix, which we store as a vector.
if (summary.iterations == 0) {
// jacobi_scaling = 1 / (1 + diagonal(J'J))
//
// 1 is added to the denominator to regularize small diagonal
// entries.
jacobi_scaling_ = 1.0 / (1.0 + jacobian_.colwise().norm().array());
}
// This explicitly computes the normal equations, which is numerically
// unstable. Nevertheless, it is often good enough and is fast.
//
// TODO(sameeragarwal): Refactor this to allow for DenseQR
// factorization.
jacobian_ = jacobian_ * jacobi_scaling_.asDiagonal();
jtj_ = jacobian_.transpose() * jacobian_;
g_ = jacobian_.transpose() * error_;
summary.gradient_max_norm = g_.array().abs().maxCoeff();
cost_ = error_.squaredNorm() / 2;
return true;
}
const Summary& Solve(const Function& function, Parameters* x_and_min) {
Initialize<NUM_RESIDUALS, NUM_PARAMETERS>(function);
assert(x_and_min);
Parameters& x = *x_and_min;
summary = Summary();
summary.iterations = 0;
// TODO(sameeragarwal): Deal with failure here.
Update(function, x);
summary.initial_cost = cost_;
summary.final_cost = cost_;
if (summary.gradient_max_norm < options.gradient_tolerance) {
summary.status = GRADIENT_TOO_SMALL;
return summary;
}
if (cost_ < options.cost_threshold) {
summary.status = COST_TOO_SMALL;
return summary;
}
Scalar u = 1.0 / options.initial_trust_region_radius;
Scalar v = 2;
for (summary.iterations = 1;
summary.iterations < options.max_num_iterations;
summary.iterations++) {
jtj_regularized_ = jtj_;
const Scalar min_diagonal = 1e-6;
const Scalar max_diagonal = 1e32;
for (int i = 0; i < lm_diagonal_.rows(); ++i) {
lm_diagonal_[i] = std::sqrt(
u * std::min(std::max(jtj_(i, i), min_diagonal), max_diagonal));
jtj_regularized_(i, i) += lm_diagonal_[i] * lm_diagonal_[i];
}
// TODO(sameeragarwal): Check for failure and deal with it.
linear_solver_.compute(jtj_regularized_);
lm_step_ = linear_solver_.solve(g_);
dx_ = jacobi_scaling_.asDiagonal() * lm_step_;
// Adding parameter_tolerance to x.norm() ensures that this
// works if x is near zero.
const Scalar parameter_tolerance =
options.parameter_tolerance *
(x.norm() + options.parameter_tolerance);
if (dx_.norm() < parameter_tolerance) {
summary.status = RELATIVE_STEP_SIZE_TOO_SMALL;
break;
}
x_new_ = x + dx_;
// TODO(keir): Add proper handling of errors from user eval of cost
// functions.
function(&x_new_[0], &f_x_new_[0], NULL);
const Scalar cost_change = (2 * cost_ - f_x_new_.squaredNorm());
// TODO(sameeragarwal): Better more numerically stable evaluation.
const Scalar model_cost_change = lm_step_.dot(2 * g_ - jtj_ * lm_step_);
// rho is the ratio of the actual reduction in error to the reduction
// in error that would be obtained if the problem was linear. See [1]
// for details.
Scalar rho(cost_change / model_cost_change);
if (rho > 0) {
// Accept the Levenberg-Marquardt step because the linear
// model fits well.
x = x_new_;
// TODO(sameeragarwal): Deal with failure.
Update(function, x);
if (summary.gradient_max_norm < options.gradient_tolerance) {
summary.status = GRADIENT_TOO_SMALL;
break;
}
if (cost_ < options.cost_threshold) {
summary.status = COST_TOO_SMALL;
break;
}
Scalar tmp = Scalar(2 * rho - 1);
u = u * std::max(1 / 3., 1 - tmp * tmp * tmp);
v = 2;
continue;
}
// Reject the update because either the normal equations failed to solve
// or the local linear model was not good (rho < 0). Instead, increase u
// to move closer to gradient descent.
u *= v;
v *= 2;
}
summary.final_cost = cost_;
return summary;
}
Options options;
Summary summary;
private:
// Preallocate everything, including temporary storage needed for solving the
// linear system. This allows reusing the intermediate storage across solves.
LinearSolver linear_solver_;
Scalar cost_;
Parameters dx_, x_new_, g_, jacobi_scaling_, lm_diagonal_, lm_step_;
Eigen::Matrix<Scalar, NUM_RESIDUALS, 1> error_, f_x_new_;
Eigen::Matrix<Scalar, NUM_RESIDUALS, NUM_PARAMETERS> jacobian_;
Eigen::Matrix<Scalar, NUM_PARAMETERS, NUM_PARAMETERS> jtj_, jtj_regularized_;
// The following definitions are needed for template metaprogramming.
template <bool Condition, typename T>
struct enable_if;
template <typename T>
struct enable_if<true, T> {
typedef T type;
};
// The number of parameters and residuals are dynamically sized.
template <int R, int P>
typename enable_if<(R == Eigen::Dynamic && P == Eigen::Dynamic), void>::type
Initialize(const Function& function) {
Initialize(function.NumResiduals(), function.NumParameters());
}
// The number of parameters is dynamically sized and the number of
// residuals is statically sized.
template <int R, int P>
typename enable_if<(R == Eigen::Dynamic && P != Eigen::Dynamic), void>::type
Initialize(const Function& function) {
Initialize(function.NumResiduals(), P);
}
// The number of parameters is statically sized and the number of
// residuals is dynamically sized.
template <int R, int P>
typename enable_if<(R != Eigen::Dynamic && P == Eigen::Dynamic), void>::type
Initialize(const Function& function) {
Initialize(R, function.NumParameters());
}
// The number of parameters and residuals are statically sized.
template <int R, int P>
typename enable_if<(R != Eigen::Dynamic && P != Eigen::Dynamic), void>::type
Initialize(const Function& /* function */) {}
void Initialize(int num_residuals, int num_parameters) {
dx_.resize(num_parameters);
x_new_.resize(num_parameters);
g_.resize(num_parameters);
jacobi_scaling_.resize(num_parameters);
lm_diagonal_.resize(num_parameters);
lm_step_.resize(num_parameters);
error_.resize(num_residuals);
f_x_new_.resize(num_residuals);
jacobian_.resize(num_residuals, num_parameters);
jtj_.resize(num_parameters, num_parameters);
jtj_regularized_.resize(num_parameters, num_parameters);
}
};
} // namespace ceres
#endif // CERES_PUBLIC_TINY_SOLVER_H_

View File

@@ -0,0 +1,206 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: mierle@gmail.com (Keir Mierle)
//
// WARNING WARNING WARNING
// WARNING WARNING WARNING Tiny solver is experimental and will change.
// WARNING WARNING WARNING
#ifndef CERES_PUBLIC_TINY_SOLVER_AUTODIFF_FUNCTION_H_
#define CERES_PUBLIC_TINY_SOLVER_AUTODIFF_FUNCTION_H_
#include <memory>
#include <type_traits>
#include "Eigen/Core"
#include "ceres/jet.h"
#include "ceres/types.h" // For kImpossibleValue.
namespace ceres {
// An adapter around autodiff-style CostFunctors to enable easier use of
// TinySolver. See the example below showing how to use it:
//
// // Example for cost functor with static residual size.
// // Same as an autodiff cost functor, but taking only 1 parameter.
// struct MyFunctor {
// template<typename T>
// bool operator()(const T* const parameters, T* residuals) const {
// const T& x = parameters[0];
// const T& y = parameters[1];
// const T& z = parameters[2];
// residuals[0] = x + 2.*y + 4.*z;
// residuals[1] = y * z;
// return true;
// }
// };
//
// typedef TinySolverAutoDiffFunction<MyFunctor, 2, 3>
// AutoDiffFunction;
//
// MyFunctor my_functor;
// AutoDiffFunction f(my_functor);
//
// Vec3 x = ...;
// TinySolver<AutoDiffFunction> solver;
// solver.Solve(f, &x);
//
// // Example for cost functor with dynamic residual size.
// // NumResiduals() supplies dynamic size of residuals.
// // Same functionality as in tiny_solver.h but with autodiff.
// struct MyFunctorWithDynamicResiduals {
// int NumResiduals() const {
// return 2;
// }
//
// template<typename T>
// bool operator()(const T* const parameters, T* residuals) const {
// const T& x = parameters[0];
// const T& y = parameters[1];
// const T& z = parameters[2];
// residuals[0] = x + static_cast<T>(2.)*y + static_cast<T>(4.)*z;
// residuals[1] = y * z;
// return true;
// }
// };
//
// typedef TinySolverAutoDiffFunction<MyFunctorWithDynamicResiduals,
// Eigen::Dynamic,
// 3>
// AutoDiffFunctionWithDynamicResiduals;
//
// MyFunctorWithDynamicResiduals my_functor_dyn;
// AutoDiffFunctionWithDynamicResiduals f(my_functor_dyn);
//
// Vec3 x = ...;
// TinySolver<AutoDiffFunctionWithDynamicResiduals> solver;
// solver.Solve(f, &x);
//
// WARNING: The cost function adapter is not thread safe.
template <typename CostFunctor,
int kNumResiduals,
int kNumParameters,
typename T = double>
class TinySolverAutoDiffFunction {
public:
// This class needs to have an Eigen aligned operator new as it contains
// as a member a Jet type, which itself has a fixed-size Eigen type as member.
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
TinySolverAutoDiffFunction(const CostFunctor& cost_functor)
: cost_functor_(cost_functor) {
Initialize<kNumResiduals>(cost_functor);
}
typedef T Scalar;
enum {
NUM_PARAMETERS = kNumParameters,
NUM_RESIDUALS = kNumResiduals,
};
// This is similar to AutoDifferentiate(), but since there is only one
// parameter block it is easier to inline to avoid overhead.
bool operator()(const T* parameters, T* residuals, T* jacobian) const {
if (jacobian == NULL) {
// No jacobian requested, so just directly call the cost function with
// doubles, skipping jets and derivatives.
return cost_functor_(parameters, residuals);
}
// Initialize the input jets with passed parameters.
for (int i = 0; i < kNumParameters; ++i) {
jet_parameters_[i].a = parameters[i]; // Scalar part.
jet_parameters_[i].v.setZero(); // Derivative part.
jet_parameters_[i].v[i] = T(1.0);
}
// Initialize the output jets such that we can detect user errors.
for (int i = 0; i < num_residuals_; ++i) {
jet_residuals_[i].a = kImpossibleValue;
jet_residuals_[i].v.setConstant(kImpossibleValue);
}
// Execute the cost function, but with jets to find the derivative.
if (!cost_functor_(jet_parameters_, jet_residuals_.data())) {
return false;
}
// Copy the jacobian out of the derivative part of the residual jets.
Eigen::Map<Eigen::Matrix<T, kNumResiduals, kNumParameters>> jacobian_matrix(
jacobian, num_residuals_, kNumParameters);
for (int r = 0; r < num_residuals_; ++r) {
residuals[r] = jet_residuals_[r].a;
// Note that while this looks like a fast vectorized write, in practice it
// unfortunately thrashes the cache since the writes to the column-major
// jacobian are strided (e.g. rows are non-contiguous).
jacobian_matrix.row(r) = jet_residuals_[r].v;
}
return true;
}
int NumResiduals() const {
return num_residuals_; // Set by Initialize.
}
private:
const CostFunctor& cost_functor_;
// The number of residuals at runtime.
// This will be overriden if NUM_RESIDUALS == Eigen::Dynamic.
int num_residuals_ = kNumResiduals;
// To evaluate the cost function with jets, temporary storage is needed. These
// are the buffers that are used during evaluation; parameters for the input,
// and jet_residuals_ are where the final cost and derivatives end up.
//
// Since this buffer is used for evaluation, the adapter is not thread safe.
using JetType = Jet<T, kNumParameters>;
mutable JetType jet_parameters_[kNumParameters];
// Eigen::Matrix serves as static or dynamic container.
mutable Eigen::Matrix<JetType, kNumResiduals, 1> jet_residuals_;
// The number of residuals is dynamically sized and the number of
// parameters is statically sized.
template <int R>
typename std::enable_if<(R == Eigen::Dynamic), void>::type Initialize(
const CostFunctor& function) {
jet_residuals_.resize(function.NumResiduals());
num_residuals_ = function.NumResiduals();
}
// The number of parameters and residuals are statically sized.
template <int R>
typename std::enable_if<(R != Eigen::Dynamic), void>::type Initialize(
const CostFunctor& /* function */) {
num_residuals_ = kNumResiduals;
}
};
} // namespace ceres
#endif // CERES_PUBLIC_TINY_SOLVER_AUTODIFF_FUNCTION_H_

View File

@@ -0,0 +1,142 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#ifndef CERES_PUBLIC_TINY_SOLVER_COST_FUNCTION_ADAPTER_H_
#define CERES_PUBLIC_TINY_SOLVER_COST_FUNCTION_ADAPTER_H_
#include "Eigen/Core"
#include "ceres/cost_function.h"
#include "glog/logging.h"
namespace ceres {
// An adapter class that lets users of TinySolver use
// ceres::CostFunction objects that have exactly one parameter block.
//
// The adapter allows for the number of residuals and the size of the
// parameter block to be specified at compile or run-time.
//
// WARNING: This object is not thread-safe.
//
// Example usage:
//
// CostFunction* cost_function = ...
//
// Number of residuals and parameter block size known at compile time:
//
// TinySolverCostFunctionAdapter<kNumResiduals, kNumParameters>
// cost_function_adapter(*cost_function);
//
// Number of residuals known at compile time and the parameter block
// size not known at compile time.
//
// TinySolverCostFunctionAdapter<kNumResiduals, Eigen::Dynamic>
// cost_function_adapter(*cost_function);
//
// Number of residuals not known at compile time and the parameter
// block size known at compile time.
//
// TinySolverCostFunctionAdapter<Eigen::Dynamic, kParameterBlockSize>
// cost_function_adapter(*cost_function);
//
// Number of residuals not known at compile time and the parameter
// block size not known at compile time.
//
// TinySolverCostFunctionAdapter cost_function_adapter(*cost_function);
//
template <int kNumResiduals = Eigen::Dynamic,
int kNumParameters = Eigen::Dynamic>
class TinySolverCostFunctionAdapter {
public:
typedef double Scalar;
enum ComponentSizeType {
NUM_PARAMETERS = kNumParameters,
NUM_RESIDUALS = kNumResiduals
};
// This struct needs to have an Eigen aligned operator new as it contains
// fixed-size Eigen types.
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
TinySolverCostFunctionAdapter(const CostFunction& cost_function)
: cost_function_(cost_function) {
CHECK_EQ(cost_function_.parameter_block_sizes().size(), 1)
<< "Only CostFunctions with exactly one parameter blocks are allowed.";
const int parameter_block_size = cost_function_.parameter_block_sizes()[0];
if (NUM_PARAMETERS == Eigen::Dynamic || NUM_RESIDUALS == Eigen::Dynamic) {
if (NUM_RESIDUALS != Eigen::Dynamic) {
CHECK_EQ(cost_function_.num_residuals(), NUM_RESIDUALS);
}
if (NUM_PARAMETERS != Eigen::Dynamic) {
CHECK_EQ(parameter_block_size, NUM_PARAMETERS);
}
row_major_jacobian_.resize(cost_function_.num_residuals(),
parameter_block_size);
}
}
bool operator()(const double* parameters,
double* residuals,
double* jacobian) const {
if (!jacobian) {
return cost_function_.Evaluate(&parameters, residuals, NULL);
}
double* jacobians[1] = {row_major_jacobian_.data()};
if (!cost_function_.Evaluate(&parameters, residuals, jacobians)) {
return false;
}
// The Function object used by TinySolver takes its Jacobian in a
// column-major layout, and the CostFunction objects use row-major
// Jacobian matrices. So the following bit of code does the
// conversion from row-major Jacobians to column-major Jacobians.
Eigen::Map<Eigen::Matrix<double, NUM_RESIDUALS, NUM_PARAMETERS>>
col_major_jacobian(jacobian, NumResiduals(), NumParameters());
col_major_jacobian = row_major_jacobian_;
return true;
}
int NumResiduals() const { return cost_function_.num_residuals(); }
int NumParameters() const {
return cost_function_.parameter_block_sizes()[0];
}
private:
const CostFunction& cost_function_;
mutable Eigen::Matrix<double, NUM_RESIDUALS, NUM_PARAMETERS, Eigen::RowMajor>
row_major_jacobian_;
};
} // namespace ceres
#endif // CERES_PUBLIC_TINY_SOLVER_COST_FUNCTION_ADAPTER_H_

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -39,15 +39,11 @@
#include <string>
#include "ceres/internal/port.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/port.h"
namespace ceres {
// Basic integer types. These typedefs are in the Ceres namespace to avoid
// conflicts with other packages having similar typedefs.
typedef int int32;
// Argument type used in interfaces that can optionally take ownership
// of a passed in argument. If TAKE_OWNERSHIP is passed, the called
// object takes ownership of the pointer argument, and will call
@@ -116,10 +112,29 @@ enum PreconditionerType {
// the scene to determine the sparsity structure of the
// preconditioner. This is done using a clustering algorithm. The
// available visibility clustering algorithms are described below.
//
// Note: Requires SuiteSparse.
CLUSTER_JACOBI,
CLUSTER_TRIDIAGONAL
CLUSTER_TRIDIAGONAL,
// Subset preconditioner is a general purpose preconditioner
// linear least squares problems. Given a set of residual blocks,
// it uses the corresponding subset of the rows of the Jacobian to
// construct a preconditioner.
//
// Suppose the Jacobian J has been horizontally partitioned as
//
// J = [P]
// [Q]
//
// Where, Q is the set of rows corresponding to the residual
// blocks in residual_blocks_for_subset_preconditioner.
//
// The preconditioner is the inverse of the matrix Q'Q.
//
// Obviously, the efficacy of the preconditioner depends on how
// well the matrix Q approximates J'J, or how well the chosen
// residual blocks approximate the non-linear least squares
// problem.
SUBSET,
};
enum VisibilityClusteringType {
@@ -150,7 +165,7 @@ enum SparseLinearAlgebraLibraryType {
// minimum degree ordering.
SUITE_SPARSE,
// A lightweight replacment for SuiteSparse, which does not require
// A lightweight replacement for SuiteSparse, which does not require
// a LAPACK/BLAS implementation. Consequently, its performance is
// also a bit lower than SuiteSparse.
CX_SPARSE,
@@ -159,6 +174,9 @@ enum SparseLinearAlgebraLibraryType {
// the Simplicial LDLT routines.
EIGEN_SPARSE,
// Apple's Accelerate framework sparse linear algebra routines.
ACCELERATE_SPARSE,
// No sparse linear solver should be used. This does not necessarily
// imply that Ceres was built without any sparse library, although that
// is the likely use case, merely that one should not be used.
@@ -202,7 +220,7 @@ enum LineSearchDirectionType {
// symmetric matrix but only N conditions are specified by the Secant
// equation. The requirement that the Hessian approximation be positive
// definite imposes another N additional constraints, but that still leaves
// remaining degrees-of-freedom. (L)BFGS methods uniquely deteremine the
// remaining degrees-of-freedom. (L)BFGS methods uniquely determine the
// approximate Hessian by imposing the additional constraints that the
// approximation at the next iteration must be the 'closest' to the current
// approximation (the nature of how this proximity is measured is actually
@@ -222,26 +240,26 @@ enum LineSearchDirectionType {
// For more details on BFGS see:
//
// Broyden, C.G., "The Convergence of a Class of Double-rank Minimization
// Algorithms,"; J. Inst. Maths. Applics., Vol. 6, pp 7690, 1970.
// Algorithms,"; J. Inst. Maths. Applics., Vol. 6, pp 76-90, 1970.
//
// Fletcher, R., "A New Approach to Variable Metric Algorithms,"
// Computer Journal, Vol. 13, pp 317322, 1970.
// Computer Journal, Vol. 13, pp 317-322, 1970.
//
// Goldfarb, D., "A Family of Variable Metric Updates Derived by Variational
// Means," Mathematics of Computing, Vol. 24, pp 2326, 1970.
// Means," Mathematics of Computing, Vol. 24, pp 23-26, 1970.
//
// Shanno, D.F., "Conditioning of Quasi-Newton Methods for Function
// Minimization," Mathematics of Computing, Vol. 24, pp 647656, 1970.
// Minimization," Mathematics of Computing, Vol. 24, pp 647-656, 1970.
//
// For more details on L-BFGS see:
//
// Nocedal, J. (1980). "Updating Quasi-Newton Matrices with Limited
// Storage". Mathematics of Computation 35 (151): 773782.
// Storage". Mathematics of Computation 35 (151): 773-782.
//
// Byrd, R. H.; Nocedal, J.; Schnabel, R. B. (1994).
// "Representations of Quasi-Newton Matrices and their use in
// Limited Memory Methods". Mathematical Programming 63 (4):
// 129156.
// 129-156.
//
// A general reference for both methods:
//
@@ -250,7 +268,7 @@ enum LineSearchDirectionType {
BFGS,
};
// Nonliner conjugate gradient methods are a generalization of the
// Nonlinear conjugate gradient methods are a generalization of the
// method of Conjugate Gradients for linear systems. The
// generalization can be carried out in a number of different ways
// leading to number of different rules for computing the search
@@ -420,10 +438,16 @@ enum LineSearchInterpolationType {
enum CovarianceAlgorithmType {
DENSE_SVD,
SUITE_SPARSE_QR,
EIGEN_SPARSE_QR
SPARSE_QR,
};
// It is a near impossibility that user code generates this exact
// value in normal operation, thus we will use it to fill arrays
// before passing them to user code. If on return an element of the
// array still contains this value, we will assume that the user code
// did not write to that memory location.
const double kImpossibleValue = 1e302;
CERES_EXPORT const char* LinearSolverTypeToString(
LinearSolverType type);
CERES_EXPORT bool StringToLinearSolverType(std::string value,
@@ -493,6 +517,13 @@ CERES_EXPORT bool StringToNumericDiffMethodType(
std::string value,
NumericDiffMethodType* type);
CERES_EXPORT const char* LoggingTypeToString(LoggingType type);
CERES_EXPORT bool StringtoLoggingType(std::string value, LoggingType* type);
CERES_EXPORT const char* DumpFormatTypeToString(DumpFormatType type);
CERES_EXPORT bool StringtoDumpFormatType(std::string value, DumpFormatType* type);
CERES_EXPORT bool StringtoDumpFormatType(std::string value, LoggingType* type);
CERES_EXPORT const char* TerminationTypeToString(TerminationType type);
CERES_EXPORT bool IsSchurType(LinearSolverType type);

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
#ifndef CERES_PUBLIC_VERSION_H_
#define CERES_PUBLIC_VERSION_H_
#define CERES_VERSION_MAJOR 1
#define CERES_VERSION_MINOR 12
#define CERES_VERSION_MAJOR 2
#define CERES_VERSION_MINOR 0
#define CERES_VERSION_REVISION 0
// Classic CPP stringifcation; the extra level of indirection allows the

View File

@@ -0,0 +1,289 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2018 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: alexs.mac@gmail.com (Alex Stewart)
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_NO_ACCELERATE_SPARSE
#include "ceres/accelerate_sparse.h"
#include <algorithm>
#include <string>
#include <vector>
#include "ceres/compressed_col_sparse_matrix_utils.h"
#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/triplet_sparse_matrix.h"
#include "glog/logging.h"
#define CASESTR(x) case x: return #x
namespace ceres {
namespace internal {
namespace {
const char* SparseStatusToString(SparseStatus_t status) {
switch (status) {
CASESTR(SparseStatusOK);
CASESTR(SparseFactorizationFailed);
CASESTR(SparseMatrixIsSingular);
CASESTR(SparseInternalError);
CASESTR(SparseParameterError);
CASESTR(SparseStatusReleased);
default:
return "UKNOWN";
}
}
} // namespace.
// Resizes workspace as required to contain at least required_size bytes
// aligned to kAccelerateRequiredAlignment and returns a pointer to the
// aligned start.
void* ResizeForAccelerateAlignment(const size_t required_size,
std::vector<uint8_t> *workspace) {
// As per the Accelerate documentation, all workspace memory passed to the
// sparse solver functions must be 16-byte aligned.
constexpr int kAccelerateRequiredAlignment = 16;
// Although malloc() on macOS should always be 16-byte aligned, it is unclear
// if this holds for new(), or on other Apple OSs (phoneOS, watchOS etc).
// As such we assume it is not and use std::align() to create a (potentially
// offset) 16-byte aligned sub-buffer of the specified size within workspace.
workspace->resize(required_size + kAccelerateRequiredAlignment);
size_t size_from_aligned_start = workspace->size();
void* aligned_solve_workspace_start =
reinterpret_cast<void*>(workspace->data());
aligned_solve_workspace_start =
std::align(kAccelerateRequiredAlignment,
required_size,
aligned_solve_workspace_start,
size_from_aligned_start);
CHECK(aligned_solve_workspace_start != nullptr)
<< "required_size: " << required_size
<< ", workspace size: " << workspace->size();
return aligned_solve_workspace_start;
}
template<typename Scalar>
void AccelerateSparse<Scalar>::Solve(NumericFactorization* numeric_factor,
DenseVector* rhs_and_solution) {
// From SparseSolve() documentation in Solve.h
const int required_size =
numeric_factor->solveWorkspaceRequiredStatic +
numeric_factor->solveWorkspaceRequiredPerRHS;
SparseSolve(*numeric_factor, *rhs_and_solution,
ResizeForAccelerateAlignment(required_size, &solve_workspace_));
}
template<typename Scalar>
typename AccelerateSparse<Scalar>::ASSparseMatrix
AccelerateSparse<Scalar>::CreateSparseMatrixTransposeView(
CompressedRowSparseMatrix* A) {
// Accelerate uses CSC as its sparse storage format whereas Ceres uses CSR.
// As this method returns the transpose view we can flip rows/cols to map
// from CSR to CSC^T.
//
// Accelerate's columnStarts is a long*, not an int*. These types might be
// different (e.g. ARM on iOS) so always make a copy.
column_starts_.resize(A->num_rows() +1); // +1 for final column length.
std::copy_n(A->rows(), column_starts_.size(), &column_starts_[0]);
ASSparseMatrix At;
At.structure.rowCount = A->num_cols();
At.structure.columnCount = A->num_rows();
At.structure.columnStarts = &column_starts_[0];
At.structure.rowIndices = A->mutable_cols();
At.structure.attributes.transpose = false;
At.structure.attributes.triangle = SparseUpperTriangle;
At.structure.attributes.kind = SparseSymmetric;
At.structure.attributes._reserved = 0;
At.structure.attributes._allocatedBySparse = 0;
At.structure.blockSize = 1;
if (std::is_same<Scalar, double>::value) {
At.data = reinterpret_cast<Scalar*>(A->mutable_values());
} else {
values_ =
ConstVectorRef(A->values(), A->num_nonzeros()).template cast<Scalar>();
At.data = values_.data();
}
return At;
}
template<typename Scalar>
typename AccelerateSparse<Scalar>::SymbolicFactorization
AccelerateSparse<Scalar>::AnalyzeCholesky(ASSparseMatrix* A) {
return SparseFactor(SparseFactorizationCholesky, A->structure);
}
template<typename Scalar>
typename AccelerateSparse<Scalar>::NumericFactorization
AccelerateSparse<Scalar>::Cholesky(ASSparseMatrix* A,
SymbolicFactorization* symbolic_factor) {
return SparseFactor(*symbolic_factor, *A);
}
template<typename Scalar>
void AccelerateSparse<Scalar>::Cholesky(ASSparseMatrix* A,
NumericFactorization* numeric_factor) {
// From SparseRefactor() documentation in Solve.h
const int required_size = std::is_same<Scalar, double>::value
? numeric_factor->symbolicFactorization.workspaceSize_Double
: numeric_factor->symbolicFactorization.workspaceSize_Float;
return SparseRefactor(*A, numeric_factor,
ResizeForAccelerateAlignment(required_size,
&factorization_workspace_));
}
// Instantiate only for the specific template types required/supported s/t the
// definition can be in the .cc file.
template class AccelerateSparse<double>;
template class AccelerateSparse<float>;
template<typename Scalar>
std::unique_ptr<SparseCholesky>
AppleAccelerateCholesky<Scalar>::Create(OrderingType ordering_type) {
return std::unique_ptr<SparseCholesky>(
new AppleAccelerateCholesky<Scalar>(ordering_type));
}
template<typename Scalar>
AppleAccelerateCholesky<Scalar>::AppleAccelerateCholesky(
const OrderingType ordering_type)
: ordering_type_(ordering_type) {}
template<typename Scalar>
AppleAccelerateCholesky<Scalar>::~AppleAccelerateCholesky() {
FreeSymbolicFactorization();
FreeNumericFactorization();
}
template<typename Scalar>
CompressedRowSparseMatrix::StorageType
AppleAccelerateCholesky<Scalar>::StorageType() const {
return CompressedRowSparseMatrix::LOWER_TRIANGULAR;
}
template<typename Scalar>
LinearSolverTerminationType
AppleAccelerateCholesky<Scalar>::Factorize(CompressedRowSparseMatrix* lhs,
std::string* message) {
CHECK_EQ(lhs->storage_type(), StorageType());
if (lhs == NULL) {
*message = "Failure: Input lhs is NULL.";
return LINEAR_SOLVER_FATAL_ERROR;
}
typename SparseTypesTrait<Scalar>::SparseMatrix as_lhs =
as_.CreateSparseMatrixTransposeView(lhs);
if (!symbolic_factor_) {
symbolic_factor_.reset(
new typename SparseTypesTrait<Scalar>::SymbolicFactorization(
as_.AnalyzeCholesky(&as_lhs)));
if (symbolic_factor_->status != SparseStatusOK) {
*message = StringPrintf(
"Apple Accelerate Failure : Symbolic factorisation failed: %s",
SparseStatusToString(symbolic_factor_->status));
FreeSymbolicFactorization();
return LINEAR_SOLVER_FATAL_ERROR;
}
}
if (!numeric_factor_) {
numeric_factor_.reset(
new typename SparseTypesTrait<Scalar>::NumericFactorization(
as_.Cholesky(&as_lhs, symbolic_factor_.get())));
} else {
// Recycle memory from previous numeric factorization.
as_.Cholesky(&as_lhs, numeric_factor_.get());
}
if (numeric_factor_->status != SparseStatusOK) {
*message = StringPrintf(
"Apple Accelerate Failure : Numeric factorisation failed: %s",
SparseStatusToString(numeric_factor_->status));
FreeNumericFactorization();
return LINEAR_SOLVER_FAILURE;
}
return LINEAR_SOLVER_SUCCESS;
}
template<typename Scalar>
LinearSolverTerminationType
AppleAccelerateCholesky<Scalar>::Solve(const double* rhs,
double* solution,
std::string* message) {
CHECK_EQ(numeric_factor_->status, SparseStatusOK)
<< "Solve called without a call to Factorize first ("
<< SparseStatusToString(numeric_factor_->status) << ").";
const int num_cols = numeric_factor_->symbolicFactorization.columnCount;
typename SparseTypesTrait<Scalar>::DenseVector as_rhs_and_solution;
as_rhs_and_solution.count = num_cols;
if (std::is_same<Scalar, double>::value) {
as_rhs_and_solution.data = reinterpret_cast<Scalar*>(solution);
std::copy_n(rhs, num_cols, solution);
} else {
scalar_rhs_and_solution_ =
ConstVectorRef(rhs, num_cols).template cast<Scalar>();
as_rhs_and_solution.data = scalar_rhs_and_solution_.data();
}
as_.Solve(numeric_factor_.get(), &as_rhs_and_solution);
if (!std::is_same<Scalar, double>::value) {
VectorRef(solution, num_cols) =
scalar_rhs_and_solution_.template cast<double>();
}
return LINEAR_SOLVER_SUCCESS;
}
template<typename Scalar>
void AppleAccelerateCholesky<Scalar>::FreeSymbolicFactorization() {
if (symbolic_factor_) {
SparseCleanup(*symbolic_factor_);
symbolic_factor_.reset();
}
}
template<typename Scalar>
void AppleAccelerateCholesky<Scalar>::FreeNumericFactorization() {
if (numeric_factor_) {
SparseCleanup(*numeric_factor_);
numeric_factor_.reset();
}
}
// Instantiate only for the specific template types required/supported s/t the
// definition can be in the .cc file.
template class AppleAccelerateCholesky<double>;
template class AppleAccelerateCholesky<float>;
}
}
#endif // CERES_NO_ACCELERATE_SPARSE

View File

@@ -0,0 +1,147 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2018 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: alexs.mac@gmail.com (Alex Stewart)
#ifndef CERES_INTERNAL_ACCELERATE_SPARSE_H_
#define CERES_INTERNAL_ACCELERATE_SPARSE_H_
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_NO_ACCELERATE_SPARSE
#include <memory>
#include <string>
#include <vector>
#include "ceres/linear_solver.h"
#include "ceres/sparse_cholesky.h"
#include "Accelerate.h"
namespace ceres {
namespace internal {
class CompressedRowSparseMatrix;
class TripletSparseMatrix;
template<typename Scalar>
struct SparseTypesTrait {
};
template<>
struct SparseTypesTrait<double> {
typedef DenseVector_Double DenseVector;
typedef SparseMatrix_Double SparseMatrix;
typedef SparseOpaqueSymbolicFactorization SymbolicFactorization;
typedef SparseOpaqueFactorization_Double NumericFactorization;
};
template<>
struct SparseTypesTrait<float> {
typedef DenseVector_Float DenseVector;
typedef SparseMatrix_Float SparseMatrix;
typedef SparseOpaqueSymbolicFactorization SymbolicFactorization;
typedef SparseOpaqueFactorization_Float NumericFactorization;
};
template<typename Scalar>
class AccelerateSparse {
public:
using DenseVector = typename SparseTypesTrait<Scalar>::DenseVector;
// Use ASSparseMatrix to avoid collision with ceres::internal::SparseMatrix.
using ASSparseMatrix = typename SparseTypesTrait<Scalar>::SparseMatrix;
using SymbolicFactorization = typename SparseTypesTrait<Scalar>::SymbolicFactorization;
using NumericFactorization = typename SparseTypesTrait<Scalar>::NumericFactorization;
// Solves a linear system given its symbolic (reference counted within
// NumericFactorization) and numeric factorization.
void Solve(NumericFactorization* numeric_factor,
DenseVector* rhs_and_solution);
// Note: Accelerate's API passes/returns its objects by value, but as the
// objects contain pointers to the underlying data these copies are
// all shallow (in some cases Accelerate also reference counts the
// objects internally).
ASSparseMatrix CreateSparseMatrixTransposeView(CompressedRowSparseMatrix* A);
// Computes a symbolic factorisation of A that can be used in Solve().
SymbolicFactorization AnalyzeCholesky(ASSparseMatrix* A);
// Compute the numeric Cholesky factorization of A, given its
// symbolic factorization.
NumericFactorization Cholesky(ASSparseMatrix* A,
SymbolicFactorization* symbolic_factor);
// Reuse the NumericFactorization from a previous matrix with the same
// symbolic factorization to represent a new numeric factorization.
void Cholesky(ASSparseMatrix* A, NumericFactorization* numeric_factor);
private:
std::vector<long> column_starts_;
std::vector<uint8_t> solve_workspace_;
std::vector<uint8_t> factorization_workspace_;
// Storage for the values of A if Scalar != double (necessitating a copy).
Eigen::Matrix<Scalar, Eigen::Dynamic, 1> values_;
};
// An implementation of SparseCholesky interface using Apple's Accelerate
// framework.
template<typename Scalar>
class AppleAccelerateCholesky : public SparseCholesky {
public:
// Factory
static std::unique_ptr<SparseCholesky> Create(OrderingType ordering_type);
// SparseCholesky interface.
virtual ~AppleAccelerateCholesky();
CompressedRowSparseMatrix::StorageType StorageType() const;
LinearSolverTerminationType Factorize(CompressedRowSparseMatrix* lhs,
std::string* message) final;
LinearSolverTerminationType Solve(const double* rhs,
double* solution,
std::string* message) final ;
private:
AppleAccelerateCholesky(const OrderingType ordering_type);
void FreeSymbolicFactorization();
void FreeNumericFactorization();
const OrderingType ordering_type_;
AccelerateSparse<Scalar> as_;
std::unique_ptr<typename AccelerateSparse<Scalar>::SymbolicFactorization>
symbolic_factor_;
std::unique_ptr<typename AccelerateSparse<Scalar>::NumericFactorization>
numeric_factor_;
// Copy of rhs/solution if Scalar != double (necessitating a copy).
Eigen::Matrix<Scalar, Eigen::Dynamic, 1> scalar_rhs_and_solution_;
};
}
}
#endif // CERES_NO_ACCELERATE_SPARSE
#endif // CERES_INTERNAL_ACCELERATE_SPARSE_H_

View File

@@ -35,25 +35,17 @@
#include <cstddef>
#include <string>
#include <vector>
#include "ceres/fpclassify.h"
#include "ceres/stringprintf.h"
#include "ceres/types.h"
namespace ceres {
namespace internal {
using std::string;
// It is a near impossibility that user code generates this exact
// value in normal operation, thus we will use it to fill arrays
// before passing them to user code. If on return an element of the
// array still contains this value, we will assume that the user code
// did not write to that memory location.
const double kImpossibleValue = 1e302;
bool IsArrayValid(const int size, const double* x) {
if (x != NULL) {
for (int i = 0; i < size; ++i) {
if (!IsFinite(x[i]) || (x[i] == kImpossibleValue)) {
if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue)) {
return false;
}
}
@@ -67,7 +59,7 @@ int FindInvalidValue(const int size, const double* x) {
}
for (int i = 0; i < size; ++i) {
if (!IsFinite(x[i]) || (x[i] == kImpossibleValue)) {
if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue)) {
return i;
}
}

View File

@@ -66,11 +66,9 @@ int FindInvalidValue(const int size, const double* x);
// array pointer is NULL, it is treated as an array of zeros.
void AppendArrayToString(const int size, const double* x, std::string* result);
extern const double kImpossibleValue;
// This routine takes an array of integer values, sorts and uniques
// them and then maps each value in the array to its position in the
// sorted+uniqued array. By doing this, if there are are k unique
// sorted+uniqued array. By doing this, if there are k unique
// values in the array, each value is replaced by an integer in the
// range [0, k-1], while preserving their relative order.
//

View File

@@ -34,7 +34,6 @@
#include "ceres/block_structure.h"
#include "ceres/block_random_access_diagonal_matrix.h"
#include "ceres/casts.h"
#include "ceres/integral_types.h"
#include "ceres/internal/eigen.h"
namespace ceres {

View File

@@ -31,9 +31,8 @@
#ifndef CERES_INTERNAL_BLOCK_JACOBI_PRECONDITIONER_H_
#define CERES_INTERNAL_BLOCK_JACOBI_PRECONDITIONER_H_
#include <vector>
#include <memory>
#include "ceres/block_random_access_diagonal_matrix.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/preconditioner.h"
namespace ceres {
@@ -56,18 +55,21 @@ class BlockJacobiPreconditioner : public BlockSparseMatrixPreconditioner {
public:
// A must remain valid while the BlockJacobiPreconditioner is.
explicit BlockJacobiPreconditioner(const BlockSparseMatrix& A);
BlockJacobiPreconditioner(const BlockJacobiPreconditioner&) = delete;
void operator=(const BlockJacobiPreconditioner&) = delete;
virtual ~BlockJacobiPreconditioner();
// Preconditioner interface
virtual void RightMultiply(const double* x, double* y) const;
virtual int num_rows() const { return m_->num_rows(); }
virtual int num_cols() const { return m_->num_rows(); }
void RightMultiply(const double* x, double* y) const final;
int num_rows() const final { return m_->num_rows(); }
int num_cols() const final { return m_->num_rows(); }
const BlockRandomAccessDiagonalMatrix& matrix() const { return *m_; }
private:
virtual bool UpdateImpl(const BlockSparseMatrix& A, const double* D);
scoped_ptr<BlockRandomAccessDiagonalMatrix> m_;
private:
bool UpdateImpl(const BlockSparseMatrix& A, const double* D) final;
std::unique_ptr<BlockRandomAccessDiagonalMatrix> m_;
};
} // namespace internal

View File

@@ -37,7 +37,6 @@
#include "ceres/residual_block.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
namespace ceres {
namespace internal {
@@ -206,7 +205,7 @@ SparseMatrix* BlockJacobianWriter::CreateJacobian() const {
}
BlockSparseMatrix* jacobian = new BlockSparseMatrix(bs);
CHECK_NOTNULL(jacobian);
CHECK(jacobian != nullptr);
return jacobian;
}

View File

@@ -49,6 +49,7 @@ class BlockEvaluatePreparer;
class Program;
class SparseMatrix;
// TODO(sameeragarwal): This class needs documemtation.
class BlockJacobianWriter {
public:
BlockJacobianWriter(const Evaluator::Options& options,

View File

@@ -32,7 +32,6 @@
#include <vector>
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
#include "glog/logging.h"
namespace ceres {

View File

@@ -33,11 +33,10 @@
#include "ceres/block_random_access_matrix.h"
#include <memory>
#include <vector>
#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
namespace ceres {
namespace internal {
@@ -57,27 +56,29 @@ class BlockRandomAccessDenseMatrix : public BlockRandomAccessMatrix {
// blocks is a vector of block sizes. The resulting matrix has
// blocks.size() * blocks.size() cells.
explicit BlockRandomAccessDenseMatrix(const std::vector<int>& blocks);
BlockRandomAccessDenseMatrix(const BlockRandomAccessDenseMatrix&) = delete;
void operator=(const BlockRandomAccessDenseMatrix&) = delete;
// The destructor is not thread safe. It assumes that no one is
// modifying any cells when the matrix is being destroyed.
virtual ~BlockRandomAccessDenseMatrix();
// BlockRandomAccessMatrix interface.
virtual CellInfo* GetCell(int row_block_id,
int col_block_id,
int* row,
int* col,
int* row_stride,
int* col_stride);
CellInfo* GetCell(int row_block_id,
int col_block_id,
int* row,
int* col,
int* row_stride,
int* col_stride) final;
// This is not a thread safe method, it assumes that no cell is
// locked.
virtual void SetZero();
void SetZero() final;
// Since the matrix is square with the same row and column block
// structure, num_rows() = num_cols().
virtual int num_rows() const { return num_rows_; }
virtual int num_cols() const { return num_rows_; }
int num_rows() const final { return num_rows_; }
int num_cols() const final { return num_rows_; }
// The underlying matrix storing the cells.
const double* values() const { return values_.get(); }
@@ -86,10 +87,8 @@ class BlockRandomAccessDenseMatrix : public BlockRandomAccessMatrix {
private:
int num_rows_;
std::vector<int> block_layout_;
scoped_array<double> values_;
scoped_array<CellInfo> cell_infos_;
CERES_DISALLOW_COPY_AND_ASSIGN(BlockRandomAccessDenseMatrix);
std::unique_ptr<double[]> values_;
std::unique_ptr<CellInfo[]> cell_infos_;
};
} // namespace internal

View File

@@ -34,9 +34,9 @@
#include <set>
#include <utility>
#include <vector>
#include "Eigen/Dense"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/stl_util.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
@@ -137,8 +137,8 @@ void BlockRandomAccessDiagonalMatrix::Invert() {
void BlockRandomAccessDiagonalMatrix::RightMultiply(const double* x,
double* y) const {
CHECK_NOTNULL(x);
CHECK_NOTNULL(y);
CHECK(x != nullptr);
CHECK(y != nullptr);
const double* values = tsm_->values();
for (int i = 0; i < blocks_.size(); ++i) {
const int block_size = blocks_[i];

View File

@@ -31,17 +31,14 @@
#ifndef CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DIAGONAL_MATRIX_H_
#define CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DIAGONAL_MATRIX_H_
#include <memory>
#include <set>
#include <vector>
#include <utility>
#include "ceres/mutex.h"
#include <vector>
#include "ceres/block_random_access_matrix.h"
#include "ceres/collections_port.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/integral_types.h"
#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
namespace ceres {
@@ -53,22 +50,24 @@ class BlockRandomAccessDiagonalMatrix : public BlockRandomAccessMatrix {
public:
// blocks is an array of block sizes.
explicit BlockRandomAccessDiagonalMatrix(const std::vector<int>& blocks);
BlockRandomAccessDiagonalMatrix(const BlockRandomAccessDiagonalMatrix&) = delete;
void operator=(const BlockRandomAccessDiagonalMatrix&) = delete;
// The destructor is not thread safe. It assumes that no one is
// modifying any cells when the matrix is being destroyed.
virtual ~BlockRandomAccessDiagonalMatrix();
// BlockRandomAccessMatrix Interface.
virtual CellInfo* GetCell(int row_block_id,
int col_block_id,
int* row,
int* col,
int* row_stride,
int* col_stride);
CellInfo* GetCell(int row_block_id,
int col_block_id,
int* row,
int* col,
int* row_stride,
int* col_stride) final;
// This is not a thread safe method, it assumes that no cell is
// locked.
virtual void SetZero();
void SetZero() final;
// Invert the matrix assuming that each block is positive definite.
void Invert();
@@ -77,8 +76,8 @@ class BlockRandomAccessDiagonalMatrix : public BlockRandomAccessMatrix {
void RightMultiply(const double* x, double* y) const;
// Since the matrix is square, num_rows() == num_cols().
virtual int num_rows() const { return tsm_->num_rows(); }
virtual int num_cols() const { return tsm_->num_cols(); }
int num_rows() const final { return tsm_->num_rows(); }
int num_cols() const final { return tsm_->num_cols(); }
const TripletSparseMatrix* matrix() const { return tsm_.get(); }
TripletSparseMatrix* mutable_matrix() { return tsm_.get(); }
@@ -89,10 +88,9 @@ class BlockRandomAccessDiagonalMatrix : public BlockRandomAccessMatrix {
std::vector<CellInfo*> layout_;
// The underlying matrix object which actually stores the cells.
scoped_ptr<TripletSparseMatrix> tsm_;
std::unique_ptr<TripletSparseMatrix> tsm_;
friend class BlockRandomAccessDiagonalMatrixTest;
CERES_DISALLOW_COPY_AND_ASSIGN(BlockRandomAccessDiagonalMatrix);
};
} // namespace internal

View File

@@ -33,7 +33,7 @@
#ifndef CERES_INTERNAL_BLOCK_RANDOM_ACCESS_MATRIX_H_
#define CERES_INTERNAL_BLOCK_RANDOM_ACCESS_MATRIX_H_
#include "ceres/mutex.h"
#include <mutex>
namespace ceres {
namespace internal {
@@ -77,23 +77,18 @@ namespace internal {
//
// if (cell != NULL) {
// MatrixRef m(cell->values, row_stride, col_stride);
// CeresMutexLock l(&cell->m);
// std::lock_guard<std::mutex> l(&cell->m);
// m.block(row, col, row_block_size, col_block_size) = ...
// }
// Structure to carry a pointer to the array containing a cell and the
// Mutex guarding it.
// mutex guarding it.
struct CellInfo {
CellInfo()
: values(NULL) {
}
explicit CellInfo(double* ptr)
: values(ptr) {
}
CellInfo() : values(nullptr) {}
explicit CellInfo(double* values) : values(values) {}
double* values;
Mutex m;
std::mutex m;
};
class BlockRandomAccessMatrix {

View File

@@ -31,12 +31,12 @@
#include "ceres/block_random_access_sparse_matrix.h"
#include <algorithm>
#include <memory>
#include <set>
#include <utility>
#include <vector>
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/mutex.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
#include "glog/logging.h"
@@ -51,7 +51,7 @@ using std::vector;
BlockRandomAccessSparseMatrix::BlockRandomAccessSparseMatrix(
const vector<int>& blocks,
const set<pair<int, int> >& block_pairs)
const set<pair<int, int>>& block_pairs)
: kMaxRowBlocks(10 * 1000 * 1000),
blocks_(blocks) {
CHECK_LT(blocks.size(), kMaxRowBlocks);
@@ -69,11 +69,9 @@ BlockRandomAccessSparseMatrix::BlockRandomAccessSparseMatrix(
// object for looking into the values array of the
// TripletSparseMatrix.
int num_nonzeros = 0;
for (set<pair<int, int> >::const_iterator it = block_pairs.begin();
it != block_pairs.end();
++it) {
const int row_block_size = blocks_[it->first];
const int col_block_size = blocks_[it->second];
for (const auto& block_pair : block_pairs) {
const int row_block_size = blocks_[block_pair.first];
const int col_block_size = blocks_[block_pair.second];
num_nonzeros += row_block_size * col_block_size;
}
@@ -88,24 +86,19 @@ BlockRandomAccessSparseMatrix::BlockRandomAccessSparseMatrix(
double* values = tsm_->mutable_values();
int pos = 0;
for (set<pair<int, int> >::const_iterator it = block_pairs.begin();
it != block_pairs.end();
++it) {
const int row_block_size = blocks_[it->first];
const int col_block_size = blocks_[it->second];
cell_values_.push_back(make_pair(make_pair(it->first, it->second),
values + pos));
layout_[IntPairToLong(it->first, it->second)] =
for (const auto& block_pair : block_pairs) {
const int row_block_size = blocks_[block_pair.first];
const int col_block_size = blocks_[block_pair.second];
cell_values_.push_back(make_pair(block_pair, values + pos));
layout_[IntPairToLong(block_pair.first, block_pair.second)] =
new CellInfo(values + pos);
pos += row_block_size * col_block_size;
}
// Fill the sparsity pattern of the underlying matrix.
for (set<pair<int, int> >::const_iterator it = block_pairs.begin();
it != block_pairs.end();
++it) {
const int row_block_id = it->first;
const int col_block_id = it->second;
for (const auto& block_pair : block_pairs) {
const int row_block_id = block_pair.first;
const int col_block_id = block_pair.second;
const int row_block_size = blocks_[row_block_id];
const int col_block_size = blocks_[col_block_id];
int pos =
@@ -125,10 +118,8 @@ BlockRandomAccessSparseMatrix::BlockRandomAccessSparseMatrix(
// Assume that the user does not hold any locks on any cell blocks
// when they are calling SetZero.
BlockRandomAccessSparseMatrix::~BlockRandomAccessSparseMatrix() {
for (LayoutType::iterator it = layout_.begin();
it != layout_.end();
++it) {
delete it->second;
for (const auto& entry : layout_) {
delete entry.second;
}
}
@@ -163,19 +154,17 @@ void BlockRandomAccessSparseMatrix::SetZero() {
void BlockRandomAccessSparseMatrix::SymmetricRightMultiply(const double* x,
double* y) const {
vector< pair<pair<int, int>, double*> >::const_iterator it =
cell_values_.begin();
for (; it != cell_values_.end(); ++it) {
const int row = it->first.first;
for (const auto& cell_position_and_data : cell_values_) {
const int row = cell_position_and_data.first.first;
const int row_block_size = blocks_[row];
const int row_block_pos = block_positions_[row];
const int col = it->first.second;
const int col = cell_position_and_data.first.second;
const int col_block_size = blocks_[col];
const int col_block_pos = block_positions_[col];
MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
it->second, row_block_size, col_block_size,
cell_position_and_data.second, row_block_size, col_block_size,
x + col_block_pos,
y + row_block_pos);
@@ -185,7 +174,7 @@ void BlockRandomAccessSparseMatrix::SymmetricRightMultiply(const double* x,
// triangular multiply also.
if (row != col) {
MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
it->second, row_block_size, col_block_size,
cell_position_and_data.second, row_block_size, col_block_size,
x + row_block_pos,
y + col_block_pos);
}

View File

@@ -31,17 +31,16 @@
#ifndef CERES_INTERNAL_BLOCK_RANDOM_ACCESS_SPARSE_MATRIX_H_
#define CERES_INTERNAL_BLOCK_RANDOM_ACCESS_SPARSE_MATRIX_H_
#include <cstdint>
#include <memory>
#include <set>
#include <vector>
#include <unordered_map>
#include <utility>
#include "ceres/mutex.h"
#include <vector>
#include "ceres/block_random_access_matrix.h"
#include "ceres/collections_port.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/integral_types.h"
#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
#include "ceres/small_blas.h"
@@ -59,23 +58,25 @@ class BlockRandomAccessSparseMatrix : public BlockRandomAccessMatrix {
// of this matrix.
BlockRandomAccessSparseMatrix(
const std::vector<int>& blocks,
const std::set<std::pair<int, int> >& block_pairs);
const std::set<std::pair<int, int>>& block_pairs);
BlockRandomAccessSparseMatrix(const BlockRandomAccessSparseMatrix&) = delete;
void operator=(const BlockRandomAccessSparseMatrix&) = delete;
// The destructor is not thread safe. It assumes that no one is
// modifying any cells when the matrix is being destroyed.
virtual ~BlockRandomAccessSparseMatrix();
// BlockRandomAccessMatrix Interface.
virtual CellInfo* GetCell(int row_block_id,
int col_block_id,
int* row,
int* col,
int* row_stride,
int* col_stride);
CellInfo* GetCell(int row_block_id,
int col_block_id,
int* row,
int* col,
int* row_stride,
int* col_stride) final;
// This is not a thread safe method, it assumes that no cell is
// locked.
virtual void SetZero();
void SetZero() final;
// Assume that the matrix is symmetric and only one half of the
// matrix is stored.
@@ -84,24 +85,24 @@ class BlockRandomAccessSparseMatrix : public BlockRandomAccessMatrix {
void SymmetricRightMultiply(const double* x, double* y) const;
// Since the matrix is square, num_rows() == num_cols().
virtual int num_rows() const { return tsm_->num_rows(); }
virtual int num_cols() const { return tsm_->num_cols(); }
int num_rows() const final { return tsm_->num_rows(); }
int num_cols() const final { return tsm_->num_cols(); }
// Access to the underlying matrix object.
const TripletSparseMatrix* matrix() const { return tsm_.get(); }
TripletSparseMatrix* mutable_matrix() { return tsm_.get(); }
private:
int64 IntPairToLong(int row, int col) const {
int64_t IntPairToLong(int row, int col) const {
return row * kMaxRowBlocks + col;
}
void LongToIntPair(int64 index, int* row, int* col) const {
void LongToIntPair(int64_t index, int* row, int* col) const {
*row = index / kMaxRowBlocks;
*col = index % kMaxRowBlocks;
}
const int64 kMaxRowBlocks;
const int64_t kMaxRowBlocks;
// row/column block sizes.
const std::vector<int> blocks_;
@@ -109,18 +110,17 @@ class BlockRandomAccessSparseMatrix : public BlockRandomAccessMatrix {
// A mapping from <row_block_id, col_block_id> to the position in
// the values array of tsm_ where the block is stored.
typedef HashMap<long int, CellInfo* > LayoutType;
typedef std::unordered_map<long int, CellInfo* > LayoutType;
LayoutType layout_;
// In order traversal of contents of the matrix. This allows us to
// implement a matrix-vector which is 20% faster than using the
// iterator in the Layout object instead.
std::vector<std::pair<std::pair<int, int>, double*> > cell_values_;
std::vector<std::pair<std::pair<int, int>, double*>> cell_values_;
// The underlying matrix object which actually stores the cells.
scoped_ptr<TripletSparseMatrix> tsm_;
std::unique_ptr<TripletSparseMatrix> tsm_;
friend class BlockRandomAccessSparseMatrixTest;
CERES_DISALLOW_COPY_AND_ASSIGN(BlockRandomAccessSparseMatrix);
};
} // namespace internal

View File

@@ -35,6 +35,7 @@
#include <vector>
#include "ceres/block_structure.h"
#include "ceres/internal/eigen.h"
#include "ceres/random.h"
#include "ceres/small_blas.h"
#include "ceres/triplet_sparse_matrix.h"
#include "glog/logging.h"
@@ -51,9 +52,8 @@ BlockSparseMatrix::BlockSparseMatrix(
: num_rows_(0),
num_cols_(0),
num_nonzeros_(0),
values_(NULL),
block_structure_(block_structure) {
CHECK_NOTNULL(block_structure_.get());
CHECK(block_structure_ != nullptr);
// Count the number of columns in the matrix.
for (int i = 0; i < block_structure_->cols.size(); ++i) {
@@ -80,7 +80,8 @@ BlockSparseMatrix::BlockSparseMatrix(
VLOG(2) << "Allocating values array with "
<< num_nonzeros_ * sizeof(double) << " bytes."; // NOLINT
values_.reset(new double[num_nonzeros_]);
CHECK_NOTNULL(values_.get());
max_num_nonzeros_ = num_nonzeros_;
CHECK(values_ != nullptr);
}
void BlockSparseMatrix::SetZero() {
@@ -88,8 +89,8 @@ void BlockSparseMatrix::SetZero() {
}
void BlockSparseMatrix::RightMultiply(const double* x, double* y) const {
CHECK_NOTNULL(x);
CHECK_NOTNULL(y);
CHECK(x != nullptr);
CHECK(y != nullptr);
for (int i = 0; i < block_structure_->rows.size(); ++i) {
int row_block_pos = block_structure_->rows[i].block.position;
@@ -108,8 +109,8 @@ void BlockSparseMatrix::RightMultiply(const double* x, double* y) const {
}
void BlockSparseMatrix::LeftMultiply(const double* x, double* y) const {
CHECK_NOTNULL(x);
CHECK_NOTNULL(y);
CHECK(x != nullptr);
CHECK(y != nullptr);
for (int i = 0; i < block_structure_->rows.size(); ++i) {
int row_block_pos = block_structure_->rows[i].block.position;
@@ -128,7 +129,7 @@ void BlockSparseMatrix::LeftMultiply(const double* x, double* y) const {
}
void BlockSparseMatrix::SquaredColumnNorm(double* x) const {
CHECK_NOTNULL(x);
CHECK(x != nullptr);
VectorRef(x, num_cols_).setZero();
for (int i = 0; i < block_structure_->rows.size(); ++i) {
int row_block_size = block_structure_->rows[i].block.size;
@@ -145,7 +146,7 @@ void BlockSparseMatrix::SquaredColumnNorm(double* x) const {
}
void BlockSparseMatrix::ScaleColumns(const double* scale) {
CHECK_NOTNULL(scale);
CHECK(scale != nullptr);
for (int i = 0; i < block_structure_->rows.size(); ++i) {
int row_block_size = block_structure_->rows[i].block.size;
@@ -162,7 +163,7 @@ void BlockSparseMatrix::ScaleColumns(const double* scale) {
}
void BlockSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
CHECK_NOTNULL(dense_matrix);
CHECK(dense_matrix != nullptr);
dense_matrix->resize(num_rows_, num_cols_);
dense_matrix->setZero();
@@ -185,7 +186,7 @@ void BlockSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
void BlockSparseMatrix::ToTripletSparseMatrix(
TripletSparseMatrix* matrix) const {
CHECK_NOTNULL(matrix);
CHECK(matrix != nullptr);
matrix->Reserve(num_nonzeros_);
matrix->Resize(num_rows_, num_cols_);
@@ -220,7 +221,7 @@ const CompressedRowBlockStructure* BlockSparseMatrix::block_structure()
}
void BlockSparseMatrix::ToTextFile(FILE* file) const {
CHECK_NOTNULL(file);
CHECK(file != nullptr);
for (int i = 0; i < block_structure_->rows.size(); ++i) {
const int row_block_pos = block_structure_->rows[i].block.position;
const int row_block_size = block_structure_->rows[i].block.size;
@@ -242,5 +243,162 @@ void BlockSparseMatrix::ToTextFile(FILE* file) const {
}
}
BlockSparseMatrix* BlockSparseMatrix::CreateDiagonalMatrix(
const double* diagonal, const std::vector<Block>& column_blocks) {
// Create the block structure for the diagonal matrix.
CompressedRowBlockStructure* bs = new CompressedRowBlockStructure();
bs->cols = column_blocks;
int position = 0;
bs->rows.resize(column_blocks.size(), CompressedRow(1));
for (int i = 0; i < column_blocks.size(); ++i) {
CompressedRow& row = bs->rows[i];
row.block = column_blocks[i];
Cell& cell = row.cells[0];
cell.block_id = i;
cell.position = position;
position += row.block.size * row.block.size;
}
// Create the BlockSparseMatrix with the given block structure.
BlockSparseMatrix* matrix = new BlockSparseMatrix(bs);
matrix->SetZero();
// Fill the values array of the block sparse matrix.
double* values = matrix->mutable_values();
for (int i = 0; i < column_blocks.size(); ++i) {
const int size = column_blocks[i].size;
for (int j = 0; j < size; ++j) {
// (j + 1) * size is compact way of accessing the (j,j) entry.
values[j * (size + 1)] = diagonal[j];
}
diagonal += size;
values += size * size;
}
return matrix;
}
void BlockSparseMatrix::AppendRows(const BlockSparseMatrix& m) {
CHECK_EQ(m.num_cols(), num_cols());
const CompressedRowBlockStructure* m_bs = m.block_structure();
CHECK_EQ(m_bs->cols.size(), block_structure_->cols.size());
const int old_num_nonzeros = num_nonzeros_;
const int old_num_row_blocks = block_structure_->rows.size();
block_structure_->rows.resize(old_num_row_blocks + m_bs->rows.size());
for (int i = 0; i < m_bs->rows.size(); ++i) {
const CompressedRow& m_row = m_bs->rows[i];
CompressedRow& row = block_structure_->rows[old_num_row_blocks + i];
row.block.size = m_row.block.size;
row.block.position = num_rows_;
num_rows_ += m_row.block.size;
row.cells.resize(m_row.cells.size());
for (int c = 0; c < m_row.cells.size(); ++c) {
const int block_id = m_row.cells[c].block_id;
row.cells[c].block_id = block_id;
row.cells[c].position = num_nonzeros_;
num_nonzeros_ += m_row.block.size * m_bs->cols[block_id].size;
}
}
if (num_nonzeros_ > max_num_nonzeros_) {
double* new_values = new double[num_nonzeros_];
std::copy(values_.get(), values_.get() + old_num_nonzeros, new_values);
values_.reset(new_values);
max_num_nonzeros_ = num_nonzeros_;
}
std::copy(m.values(),
m.values() + m.num_nonzeros(),
values_.get() + old_num_nonzeros);
}
void BlockSparseMatrix::DeleteRowBlocks(const int delta_row_blocks) {
const int num_row_blocks = block_structure_->rows.size();
int delta_num_nonzeros = 0;
int delta_num_rows = 0;
const std::vector<Block>& column_blocks = block_structure_->cols;
for (int i = 0; i < delta_row_blocks; ++i) {
const CompressedRow& row = block_structure_->rows[num_row_blocks - i - 1];
delta_num_rows += row.block.size;
for (int c = 0; c < row.cells.size(); ++c) {
const Cell& cell = row.cells[c];
delta_num_nonzeros += row.block.size * column_blocks[cell.block_id].size;
}
}
num_nonzeros_ -= delta_num_nonzeros;
num_rows_ -= delta_num_rows;
block_structure_->rows.resize(num_row_blocks - delta_row_blocks);
}
BlockSparseMatrix* BlockSparseMatrix::CreateRandomMatrix(
const BlockSparseMatrix::RandomMatrixOptions& options) {
CHECK_GT(options.num_row_blocks, 0);
CHECK_GT(options.min_row_block_size, 0);
CHECK_GT(options.max_row_block_size, 0);
CHECK_LE(options.min_row_block_size, options.max_row_block_size);
CHECK_GT(options.block_density, 0.0);
CHECK_LE(options.block_density, 1.0);
CompressedRowBlockStructure* bs = new CompressedRowBlockStructure();
if (options.col_blocks.empty()) {
CHECK_GT(options.num_col_blocks, 0);
CHECK_GT(options.min_col_block_size, 0);
CHECK_GT(options.max_col_block_size, 0);
CHECK_LE(options.min_col_block_size, options.max_col_block_size);
// Generate the col block structure.
int col_block_position = 0;
for (int i = 0; i < options.num_col_blocks; ++i) {
// Generate a random integer in [min_col_block_size, max_col_block_size]
const int delta_block_size =
Uniform(options.max_col_block_size - options.min_col_block_size);
const int col_block_size = options.min_col_block_size + delta_block_size;
bs->cols.push_back(Block(col_block_size, col_block_position));
col_block_position += col_block_size;
}
} else {
bs->cols = options.col_blocks;
}
bool matrix_has_blocks = false;
while (!matrix_has_blocks) {
VLOG(1) << "Clearing";
bs->rows.clear();
int row_block_position = 0;
int value_position = 0;
for (int r = 0; r < options.num_row_blocks; ++r) {
const int delta_block_size =
Uniform(options.max_row_block_size - options.min_row_block_size);
const int row_block_size = options.min_row_block_size + delta_block_size;
bs->rows.push_back(CompressedRow());
CompressedRow& row = bs->rows.back();
row.block.size = row_block_size;
row.block.position = row_block_position;
row_block_position += row_block_size;
for (int c = 0; c < bs->cols.size(); ++c) {
if (RandDouble() > options.block_density) continue;
row.cells.push_back(Cell());
Cell& cell = row.cells.back();
cell.block_id = c;
cell.position = value_position;
value_position += row_block_size * bs->cols[c].size;
matrix_has_blocks = true;
}
}
}
BlockSparseMatrix* matrix = new BlockSparseMatrix(bs);
double* values = matrix->mutable_values();
for (int i = 0; i < matrix->num_nonzeros(); ++i) {
values[i] = RandNormal();
}
return matrix;
}
} // namespace internal
} // namespace ceres

View File

@@ -34,11 +34,10 @@
#ifndef CERES_INTERNAL_BLOCK_SPARSE_MATRIX_H_
#define CERES_INTERNAL_BLOCK_SPARSE_MATRIX_H_
#include <memory>
#include "ceres/block_structure.h"
#include "ceres/sparse_matrix.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/macros.h"
#include "ceres/internal/scoped_ptr.h"
namespace ceres {
namespace internal {
@@ -64,34 +63,99 @@ class BlockSparseMatrix : public SparseMatrix {
explicit BlockSparseMatrix(CompressedRowBlockStructure* block_structure);
BlockSparseMatrix();
BlockSparseMatrix(const BlockSparseMatrix&) = delete;
void operator=(const BlockSparseMatrix&) = delete;
virtual ~BlockSparseMatrix();
// Implementation of SparseMatrix interface.
virtual void SetZero();
virtual void RightMultiply(const double* x, double* y) const;
virtual void LeftMultiply(const double* x, double* y) const;
virtual void SquaredColumnNorm(double* x) const;
virtual void ScaleColumns(const double* scale);
virtual void ToDenseMatrix(Matrix* dense_matrix) const;
virtual void ToTextFile(FILE* file) const;
void SetZero() final;
void RightMultiply(const double* x, double* y) const final;
void LeftMultiply(const double* x, double* y) const final;
void SquaredColumnNorm(double* x) const final;
void ScaleColumns(const double* scale) final;
void ToDenseMatrix(Matrix* dense_matrix) const final;
void ToTextFile(FILE* file) const final;
virtual int num_rows() const { return num_rows_; }
virtual int num_cols() const { return num_cols_; }
virtual int num_nonzeros() const { return num_nonzeros_; }
virtual const double* values() const { return values_.get(); }
virtual double* mutable_values() { return values_.get(); }
int num_rows() const final { return num_rows_; }
int num_cols() const final { return num_cols_; }
int num_nonzeros() const final { return num_nonzeros_; }
const double* values() const final { return values_.get(); }
double* mutable_values() final { return values_.get(); }
void ToTripletSparseMatrix(TripletSparseMatrix* matrix) const;
const CompressedRowBlockStructure* block_structure() const;
// Append the contents of m to the bottom of this matrix. m must
// have the same column blocks structure as this matrix.
void AppendRows(const BlockSparseMatrix& m);
// Delete the bottom delta_rows_blocks.
void DeleteRowBlocks(int delta_row_blocks);
static BlockSparseMatrix* CreateDiagonalMatrix(
const double* diagonal,
const std::vector<Block>& column_blocks);
struct RandomMatrixOptions {
int num_row_blocks = 0;
int min_row_block_size = 0;
int max_row_block_size = 0;
int num_col_blocks = 0;
int min_col_block_size = 0;
int max_col_block_size = 0;
// 0 < block_density <= 1 is the probability of a block being
// present in the matrix. A given random matrix will not have
// precisely this density.
double block_density = 0.0;
// If col_blocks is non-empty, then the generated random matrix
// has this block structure and the column related options in this
// struct are ignored.
std::vector<Block> col_blocks;
};
// Create a random BlockSparseMatrix whose entries are normally
// distributed and whose structure is determined by
// RandomMatrixOptions.
//
// Caller owns the result.
static BlockSparseMatrix* CreateRandomMatrix(
const RandomMatrixOptions& options);
private:
int num_rows_;
int num_cols_;
int max_num_nonzeros_;
int num_nonzeros_;
scoped_array<double> values_;
scoped_ptr<CompressedRowBlockStructure> block_structure_;
CERES_DISALLOW_COPY_AND_ASSIGN(BlockSparseMatrix);
int max_num_nonzeros_;
std::unique_ptr<double[]> values_;
std::unique_ptr<CompressedRowBlockStructure> block_structure_;
};
// A number of algorithms like the SchurEliminator do not need
// access to the full BlockSparseMatrix interface. They only
// need read only access to the values array and the block structure.
//
// BlockSparseDataMatrix a struct that carries these two bits of
// information
class BlockSparseMatrixData {
public:
BlockSparseMatrixData(const BlockSparseMatrix& m)
: block_structure_(m.block_structure()), values_(m.values()){};
BlockSparseMatrixData(const CompressedRowBlockStructure* block_structure,
const double* values)
: block_structure_(block_structure), values_(values) {}
const CompressedRowBlockStructure* block_structure() const {
return block_structure_;
}
const double* values() const { return values_; }
private:
const CompressedRowBlockStructure* block_structure_;
const double* values_;
};
} // namespace internal

View File

@@ -38,14 +38,14 @@
#ifndef CERES_INTERNAL_BLOCK_STRUCTURE_H_
#define CERES_INTERNAL_BLOCK_STRUCTURE_H_
#include <cstdint>
#include <vector>
#include "ceres/internal/port.h"
#include "ceres/types.h"
namespace ceres {
namespace internal {
typedef int32 BlockSize;
typedef int32_t BlockSize;
struct Block {
Block() : size(-1), position(-1) {}
@@ -70,6 +70,11 @@ struct Cell {
bool CellLessThan(const Cell& lhs, const Cell& rhs);
struct CompressedList {
CompressedList() {}
// Construct a CompressedList with the cells containing num_cells
// entries.
CompressedList(int num_cells) : cells(num_cells) {}
Block block;
std::vector<Cell> cells;
};

View File

@@ -80,9 +80,9 @@ class CallbackCostFunction : public ceres::CostFunction {
virtual ~CallbackCostFunction() {}
virtual bool Evaluate(double const* const* parameters,
bool Evaluate(double const* const* parameters,
double* residuals,
double** jacobians) const {
double** jacobians) const final {
return (*cost_function_)(user_data_,
const_cast<double**>(parameters),
residuals,
@@ -101,7 +101,7 @@ class CallbackLossFunction : public ceres::LossFunction {
explicit CallbackLossFunction(ceres_loss_function_t loss_function,
void* user_data)
: loss_function_(loss_function), user_data_(user_data) {}
virtual void Evaluate(double sq_norm, double* rho) const {
void Evaluate(double sq_norm, double* rho) const final {
(*loss_function_)(user_data_, sq_norm, rho);
}

View File

@@ -47,9 +47,29 @@ StateUpdatingCallback::~StateUpdatingCallback() {}
CallbackReturnType StateUpdatingCallback::operator()(
const IterationSummary& summary) {
program_->StateVectorToParameterBlocks(parameters_);
program_->CopyParameterBlockStateToUserState();
return SOLVER_CONTINUE;
}
GradientProblemSolverStateUpdatingCallback::
GradientProblemSolverStateUpdatingCallback(
int num_parameters,
const double* internal_parameters,
double* user_parameters)
: num_parameters_(num_parameters),
internal_parameters_(internal_parameters),
user_parameters_(user_parameters) {}
GradientProblemSolverStateUpdatingCallback::
~GradientProblemSolverStateUpdatingCallback() {}
CallbackReturnType GradientProblemSolverStateUpdatingCallback::operator()(
const IterationSummary& summary) {
if (summary.step_is_successful) {
program_->StateVectorToParameterBlocks(parameters_);
program_->CopyParameterBlockStateToUserState();
std::copy(internal_parameters_,
internal_parameters_ + num_parameters_,
user_parameters_);
}
return SOLVER_CONTINUE;
}

View File

@@ -46,19 +46,34 @@ class StateUpdatingCallback : public IterationCallback {
public:
StateUpdatingCallback(Program* program, double* parameters);
virtual ~StateUpdatingCallback();
virtual CallbackReturnType operator()(const IterationSummary& summary);
CallbackReturnType operator()(const IterationSummary& summary) final;
private:
Program* program_;
double* parameters_;
};
// Callback for updating the externally visible state of the
// parameters vector for GradientProblemSolver.
class GradientProblemSolverStateUpdatingCallback : public IterationCallback {
public:
GradientProblemSolverStateUpdatingCallback(int num_parameters,
const double* internal_parameters,
double* user_parameters);
virtual ~GradientProblemSolverStateUpdatingCallback();
CallbackReturnType operator()(const IterationSummary& summary) final;
private:
int num_parameters_;
const double* internal_parameters_;
double* user_parameters_;
};
// Callback for logging the state of the minimizer to STDERR or
// STDOUT depending on the user's preferences and logging level.
class LoggingCallback : public IterationCallback {
public:
LoggingCallback(MinimizerType minimizer_type, bool log_to_stdout);
virtual ~LoggingCallback();
virtual CallbackReturnType operator()(const IterationSummary& summary);
CallbackReturnType operator()(const IterationSummary& summary) final;
private:
const MinimizerType minimizer_type;

View File

@@ -0,0 +1,232 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: David Gallup (dgallup@google.com)
// Sameer Agarwal (sameeragarwal@google.com)
#include "ceres/canonical_views_clustering.h"
#include <unordered_set>
#include <unordered_map>
#include "ceres/graph.h"
#include "ceres/map_util.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
using std::vector;
typedef std::unordered_map<int, int> IntMap;
typedef std::unordered_set<int> IntSet;
class CanonicalViewsClustering {
public:
CanonicalViewsClustering() {}
// Compute the canonical views clustering of the vertices of the
// graph. centers will contain the vertices that are the identified
// as the canonical views/cluster centers, and membership is a map
// from vertices to cluster_ids. The i^th cluster center corresponds
// to the i^th cluster. It is possible depending on the
// configuration of the clustering algorithm that some of the
// vertices may not be assigned to any cluster. In this case they
// are assigned to a cluster with id = kInvalidClusterId.
void ComputeClustering(const CanonicalViewsClusteringOptions& options,
const WeightedGraph<int>& graph,
vector<int>* centers,
IntMap* membership);
private:
void FindValidViews(IntSet* valid_views) const;
double ComputeClusteringQualityDifference(const int candidate,
const vector<int>& centers) const;
void UpdateCanonicalViewAssignments(const int canonical_view);
void ComputeClusterMembership(const vector<int>& centers,
IntMap* membership) const;
CanonicalViewsClusteringOptions options_;
const WeightedGraph<int>* graph_;
// Maps a view to its representative canonical view (its cluster
// center).
IntMap view_to_canonical_view_;
// Maps a view to its similarity to its current cluster center.
std::unordered_map<int, double> view_to_canonical_view_similarity_;
};
void ComputeCanonicalViewsClustering(
const CanonicalViewsClusteringOptions& options,
const WeightedGraph<int>& graph,
vector<int>* centers,
IntMap* membership) {
time_t start_time = time(NULL);
CanonicalViewsClustering cv;
cv.ComputeClustering(options, graph, centers, membership);
VLOG(2) << "Canonical views clustering time (secs): "
<< time(NULL) - start_time;
}
// Implementation of CanonicalViewsClustering
void CanonicalViewsClustering::ComputeClustering(
const CanonicalViewsClusteringOptions& options,
const WeightedGraph<int>& graph,
vector<int>* centers,
IntMap* membership) {
options_ = options;
CHECK(centers != nullptr);
CHECK(membership != nullptr);
centers->clear();
membership->clear();
graph_ = &graph;
IntSet valid_views;
FindValidViews(&valid_views);
while (valid_views.size() > 0) {
// Find the next best canonical view.
double best_difference = -std::numeric_limits<double>::max();
int best_view = 0;
// TODO(sameeragarwal): Make this loop multi-threaded.
for (const auto& view : valid_views) {
const double difference =
ComputeClusteringQualityDifference(view, *centers);
if (difference > best_difference) {
best_difference = difference;
best_view = view;
}
}
CHECK_GT(best_difference, -std::numeric_limits<double>::max());
// Add canonical view if quality improves, or if minimum is not
// yet met, otherwise break.
if ((best_difference <= 0) &&
(centers->size() >= options_.min_views)) {
break;
}
centers->push_back(best_view);
valid_views.erase(best_view);
UpdateCanonicalViewAssignments(best_view);
}
ComputeClusterMembership(*centers, membership);
}
// Return the set of vertices of the graph which have valid vertex
// weights.
void CanonicalViewsClustering::FindValidViews(
IntSet* valid_views) const {
const IntSet& views = graph_->vertices();
for (const auto& view : views) {
if (graph_->VertexWeight(view) != WeightedGraph<int>::InvalidWeight()) {
valid_views->insert(view);
}
}
}
// Computes the difference in the quality score if 'candidate' were
// added to the set of canonical views.
double CanonicalViewsClustering::ComputeClusteringQualityDifference(
const int candidate,
const vector<int>& centers) const {
// View score.
double difference =
options_.view_score_weight * graph_->VertexWeight(candidate);
// Compute how much the quality score changes if the candidate view
// was added to the list of canonical views and its nearest
// neighbors became members of its cluster.
const IntSet& neighbors = graph_->Neighbors(candidate);
for (const auto& neighbor : neighbors) {
const double old_similarity =
FindWithDefault(view_to_canonical_view_similarity_, neighbor, 0.0);
const double new_similarity = graph_->EdgeWeight(neighbor, candidate);
if (new_similarity > old_similarity) {
difference += new_similarity - old_similarity;
}
}
// Number of views penalty.
difference -= options_.size_penalty_weight;
// Orthogonality.
for (int i = 0; i < centers.size(); ++i) {
difference -= options_.similarity_penalty_weight *
graph_->EdgeWeight(centers[i], candidate);
}
return difference;
}
// Reassign views if they're more similar to the new canonical view.
void CanonicalViewsClustering::UpdateCanonicalViewAssignments(
const int canonical_view) {
const IntSet& neighbors = graph_->Neighbors(canonical_view);
for (const auto& neighbor : neighbors) {
const double old_similarity =
FindWithDefault(view_to_canonical_view_similarity_, neighbor, 0.0);
const double new_similarity =
graph_->EdgeWeight(neighbor, canonical_view);
if (new_similarity > old_similarity) {
view_to_canonical_view_[neighbor] = canonical_view;
view_to_canonical_view_similarity_[neighbor] = new_similarity;
}
}
}
// Assign a cluster id to each view.
void CanonicalViewsClustering::ComputeClusterMembership(
const vector<int>& centers,
IntMap* membership) const {
CHECK(membership != nullptr);
membership->clear();
// The i^th cluster has cluster id i.
IntMap center_to_cluster_id;
for (int i = 0; i < centers.size(); ++i) {
center_to_cluster_id[centers[i]] = i;
}
static constexpr int kInvalidClusterId = -1;
const IntSet& views = graph_->vertices();
for (const auto& view : views) {
auto it = view_to_canonical_view_.find(view);
int cluster_id = kInvalidClusterId;
if (it != view_to_canonical_view_.end()) {
cluster_id = FindOrDie(center_to_cluster_id, it->second);
}
InsertOrDie(membership, view, cluster_id);
}
}
} // namespace internal
} // namespace ceres

View File

@@ -0,0 +1,124 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// An implementation of the Canonical Views clustering algorithm from
// "Scene Summarization for Online Image Collections", Ian Simon, Noah
// Snavely, Steven M. Seitz, ICCV 2007.
//
// More details can be found at
// http://grail.cs.washington.edu/projects/canonview/
//
// Ceres uses this algorithm to perform view clustering for
// constructing visibility based preconditioners.
#ifndef CERES_INTERNAL_CANONICAL_VIEWS_CLUSTERING_H_
#define CERES_INTERNAL_CANONICAL_VIEWS_CLUSTERING_H_
#include <unordered_map>
#include <vector>
#include "ceres/graph.h"
namespace ceres {
namespace internal {
struct CanonicalViewsClusteringOptions;
// Compute a partitioning of the vertices of the graph using the
// canonical views clustering algorithm.
//
// In the following we will use the terms vertices and views
// interchangeably. Given a weighted Graph G(V,E), the canonical views
// of G are the set of vertices that best "summarize" the content
// of the graph. If w_ij i s the weight connecting the vertex i to
// vertex j, and C is the set of canonical views. Then the objective
// of the canonical views algorithm is
//
// E[C] = sum_[i in V] max_[j in C] w_ij
// - size_penalty_weight * |C|
// - similarity_penalty_weight * sum_[i in C, j in C, j > i] w_ij
//
// alpha is the size penalty that penalizes large number of canonical
// views.
//
// beta is the similarity penalty that penalizes canonical views that
// are too similar to other canonical views.
//
// Thus the canonical views algorithm tries to find a canonical view
// for each vertex in the graph which best explains it, while trying
// to minimize the number of canonical views and the overlap between
// them.
//
// We further augment the above objective function by allowing for per
// vertex weights, higher weights indicating a higher preference for
// being chosen as a canonical view. Thus if w_i is the vertex weight
// for vertex i, the objective function is then
//
// E[C] = sum_[i in V] max_[j in C] w_ij
// - size_penalty_weight * |C|
// - similarity_penalty_weight * sum_[i in C, j in C, j > i] w_ij
// + view_score_weight * sum_[i in C] w_i
//
// centers will contain the vertices that are the identified
// as the canonical views/cluster centers, and membership is a map
// from vertices to cluster_ids. The i^th cluster center corresponds
// to the i^th cluster.
//
// It is possible depending on the configuration of the clustering
// algorithm that some of the vertices may not be assigned to any
// cluster. In this case they are assigned to a cluster with id = -1;
void ComputeCanonicalViewsClustering(
const CanonicalViewsClusteringOptions& options,
const WeightedGraph<int>& graph,
std::vector<int>* centers,
std::unordered_map<int, int>* membership);
struct CanonicalViewsClusteringOptions {
// The minimum number of canonical views to compute.
int min_views = 3;
// Penalty weight for the number of canonical views. A higher
// number will result in fewer canonical views.
double size_penalty_weight = 5.75;
// Penalty weight for the diversity (orthogonality) of the
// canonical views. A higher number will encourage less similar
// canonical views.
double similarity_penalty_weight = 100;
// Weight for per-view scores. Lower weight places less
// confidence in the view scores.
double view_score_weight = 0.0;
};
} // namespace internal
} // namespace ceres
#endif // CERES_INTERNAL_CANONICAL_VIEWS_CLUSTERING_H_

View File

@@ -32,8 +32,8 @@
#define CERES_INTERNAL_CGNR_LINEAR_OPERATOR_H_
#include <algorithm>
#include <memory>
#include "ceres/linear_operator.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/internal/eigen.h"
namespace ceres {
@@ -84,7 +84,7 @@ class CgnrLinearOperator : public LinearOperator {
}
virtual ~CgnrLinearOperator() {}
virtual void RightMultiply(const double* x, double* y) const {
void RightMultiply(const double* x, double* y) const final {
std::fill(z_.get(), z_.get() + A_.num_rows(), 0.0);
// z = Ax
@@ -101,17 +101,17 @@ class CgnrLinearOperator : public LinearOperator {
}
}
virtual void LeftMultiply(const double* x, double* y) const {
void LeftMultiply(const double* x, double* y) const final {
RightMultiply(x, y);
}
virtual int num_rows() const { return A_.num_cols(); }
virtual int num_cols() const { return A_.num_cols(); }
int num_rows() const final { return A_.num_cols(); }
int num_cols() const final { return A_.num_cols(); }
private:
const LinearOperator& A_;
const double* D_;
scoped_array<double> z_;
std::unique_ptr<double[]> z_;
};
} // namespace internal

View File

@@ -35,6 +35,7 @@
#include "ceres/conjugate_gradients_solver.h"
#include "ceres/internal/eigen.h"
#include "ceres/linear_solver.h"
#include "ceres/subset_preconditioner.h"
#include "ceres/wall_time.h"
#include "glog/logging.h"
@@ -42,14 +43,19 @@ namespace ceres {
namespace internal {
CgnrSolver::CgnrSolver(const LinearSolver::Options& options)
: options_(options),
preconditioner_(NULL) {
: options_(options) {
if (options_.preconditioner_type != JACOBI &&
options_.preconditioner_type != IDENTITY) {
LOG(FATAL) << "CGNR only supports IDENTITY and JACOBI preconditioners.";
options_.preconditioner_type != IDENTITY &&
options_.preconditioner_type != SUBSET) {
LOG(FATAL)
<< "Preconditioner = "
<< PreconditionerTypeToString(options_.preconditioner_type) << ". "
<< "Congratulations, you found a bug in Ceres. Please report it.";
}
}
CgnrSolver::~CgnrSolver() {}
LinearSolver::Summary CgnrSolver::SolveImpl(
BlockSparseMatrix* A,
const double* b,
@@ -62,16 +68,31 @@ LinearSolver::Summary CgnrSolver::SolveImpl(
z.setZero();
A->LeftMultiply(b, z.data());
// Precondition if necessary.
LinearSolver::PerSolveOptions cg_per_solve_options = per_solve_options;
if (options_.preconditioner_type == JACOBI) {
if (preconditioner_.get() == NULL) {
if (!preconditioner_) {
if (options_.preconditioner_type == JACOBI) {
preconditioner_.reset(new BlockJacobiPreconditioner(*A));
} else if (options_.preconditioner_type == SUBSET) {
Preconditioner::Options preconditioner_options;
preconditioner_options.type = SUBSET;
preconditioner_options.subset_preconditioner_start_row_block =
options_.subset_preconditioner_start_row_block;
preconditioner_options.sparse_linear_algebra_library_type =
options_.sparse_linear_algebra_library_type;
preconditioner_options.use_postordering = options_.use_postordering;
preconditioner_options.num_threads = options_.num_threads;
preconditioner_options.context = options_.context;
preconditioner_.reset(
new SubsetPreconditioner(preconditioner_options, *A));
}
preconditioner_->Update(*A, per_solve_options.D);
cg_per_solve_options.preconditioner = preconditioner_.get();
}
if (preconditioner_) {
preconditioner_->Update(*A, per_solve_options.D);
}
LinearSolver::PerSolveOptions cg_per_solve_options = per_solve_options;
cg_per_solve_options.preconditioner = preconditioner_.get();
// Solve (AtA + DtD)x = z (= Atb).
VectorRef(x, A->num_cols()).setZero();
CgnrLinearOperator lhs(*A, per_solve_options.D);

View File

@@ -31,7 +31,7 @@
#ifndef CERES_INTERNAL_CGNR_SOLVER_H_
#define CERES_INTERNAL_CGNR_SOLVER_H_
#include "ceres/internal/scoped_ptr.h"
#include <memory>
#include "ceres/linear_solver.h"
namespace ceres {
@@ -51,16 +51,19 @@ class BlockJacobiPreconditioner;
class CgnrSolver : public BlockSparseMatrixSolver {
public:
explicit CgnrSolver(const LinearSolver::Options& options);
virtual Summary SolveImpl(
CgnrSolver(const CgnrSolver&) = delete;
void operator=(const CgnrSolver&) = delete;
virtual ~CgnrSolver();
Summary SolveImpl(
BlockSparseMatrix* A,
const double* b,
const LinearSolver::PerSolveOptions& per_solve_options,
double* x);
double* x) final;
private:
const LinearSolver::Options options_;
scoped_ptr<Preconditioner> preconditioner_;
CERES_DISALLOW_COPY_AND_ASSIGN(CgnrSolver);
std::unique_ptr<Preconditioner> preconditioner_;
};
} // namespace internal

View File

@@ -1,196 +0,0 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: keir@google.com (Keir Mierle)
//
// Portable HashMap and HashSet, and a specialized overload for hashing pairs.
#ifndef CERES_INTERNAL_COLLECTIONS_PORT_H_
#define CERES_INTERNAL_COLLECTIONS_PORT_H_
#include "ceres/internal/port.h"
#if defined(CERES_NO_UNORDERED_MAP)
# include <map>
# include <set>
#endif
#if defined(CERES_TR1_UNORDERED_MAP)
# include <tr1/unordered_map>
# include <tr1/unordered_set>
# define CERES_HASH_NAMESPACE_START namespace std { namespace tr1 {
# define CERES_HASH_NAMESPACE_END } }
#endif
#if defined(CERES_STD_UNORDERED_MAP)
# include <unordered_map>
# include <unordered_set>
# define CERES_HASH_NAMESPACE_START namespace std {
# define CERES_HASH_NAMESPACE_END }
#endif
#if defined(CERES_STD_UNORDERED_MAP_IN_TR1_NAMESPACE)
# include <unordered_map>
# include <unordered_set>
# define CERES_HASH_NAMESPACE_START namespace std { namespace tr1 {
# define CERES_HASH_NAMESPACE_END } }
#endif
#if !defined(CERES_NO_UNORDERED_MAP) && !defined(CERES_TR1_UNORDERED_MAP) && \
!defined(CERES_STD_UNORDERED_MAP) && !defined(CERES_STD_UNORDERED_MAP_IN_TR1_NAMESPACE) // NOLINT
# error One of: CERES_NO_UNORDERED_MAP, CERES_TR1_UNORDERED_MAP,\
CERES_STD_UNORDERED_MAP, CERES_STD_UNORDERED_MAP_IN_TR1_NAMESPACE must be defined! // NOLINT
#endif
#include <utility>
#include "ceres/integral_types.h"
// Some systems don't have access to unordered_map/unordered_set. In
// that case, substitute the hash map/set with normal map/set. The
// price to pay is slower speed for some operations.
#if defined(CERES_NO_UNORDERED_MAP)
namespace ceres {
namespace internal {
template<typename K, typename V>
struct HashMap : map<K, V> {};
template<typename K>
struct HashSet : set<K> {};
} // namespace internal
} // namespace ceres
#else
namespace ceres {
namespace internal {
#if defined(CERES_TR1_UNORDERED_MAP) || \
defined(CERES_STD_UNORDERED_MAP_IN_TR1_NAMESPACE)
template<typename K, typename V>
struct HashMap : std::tr1::unordered_map<K, V> {};
template<typename K>
struct HashSet : std::tr1::unordered_set<K> {};
#endif
#if defined(CERES_STD_UNORDERED_MAP)
template<typename K, typename V>
struct HashMap : std::unordered_map<K, V> {};
template<typename K>
struct HashSet : std::unordered_set<K> {};
#endif
#if defined(_WIN32) && !defined(__MINGW64__) && !defined(__MINGW32__)
#define GG_LONGLONG(x) x##I64
#define GG_ULONGLONG(x) x##UI64
#else
#define GG_LONGLONG(x) x##LL
#define GG_ULONGLONG(x) x##ULL
#endif
// The hash function is due to Bob Jenkins (see
// http://burtleburtle.net/bob/hash/index.html). Each mix takes 36 instructions,
// in 18 cycles if you're lucky. On x86 architectures, this requires 45
// instructions in 27 cycles, if you're lucky.
//
// 32bit version
inline void hash_mix(uint32& a, uint32& b, uint32& c) {
a -= b; a -= c; a ^= (c>>13);
b -= c; b -= a; b ^= (a<<8);
c -= a; c -= b; c ^= (b>>13);
a -= b; a -= c; a ^= (c>>12);
b -= c; b -= a; b ^= (a<<16);
c -= a; c -= b; c ^= (b>>5);
a -= b; a -= c; a ^= (c>>3);
b -= c; b -= a; b ^= (a<<10);
c -= a; c -= b; c ^= (b>>15);
}
// 64bit version
inline void hash_mix(uint64& a, uint64& b, uint64& c) {
a -= b; a -= c; a ^= (c>>43);
b -= c; b -= a; b ^= (a<<9);
c -= a; c -= b; c ^= (b>>8);
a -= b; a -= c; a ^= (c>>38);
b -= c; b -= a; b ^= (a<<23);
c -= a; c -= b; c ^= (b>>5);
a -= b; a -= c; a ^= (c>>35);
b -= c; b -= a; b ^= (a<<49);
c -= a; c -= b; c ^= (b>>11);
}
inline uint32 Hash32NumWithSeed(uint32 num, uint32 c) {
// The golden ratio; an arbitrary value.
uint32 b = 0x9e3779b9UL;
hash_mix(num, b, c);
return c;
}
inline uint64 Hash64NumWithSeed(uint64 num, uint64 c) {
// More of the golden ratio.
uint64 b = GG_ULONGLONG(0xe08c1d668b756f82);
hash_mix(num, b, c);
return c;
}
} // namespace internal
} // namespace ceres
// Since on some platforms this is a doubly-nested namespace (std::tr1) and
// others it is not, the entire namespace line must be in a macro.
CERES_HASH_NAMESPACE_START
// The outrageously annoying specializations below are for portability reasons.
// In short, it's not possible to have two overloads of hash<pair<T1, T2>
// Hasher for STL pairs. Requires hashers for both members to be defined.
template<typename T>
struct hash<pair<T, T> > {
size_t operator()(const pair<T, T>& p) const {
size_t h1 = hash<T>()(p.first);
size_t h2 = hash<T>()(p.second);
// The decision below is at compile time
return (sizeof(h1) <= sizeof(ceres::internal::uint32)) ?
ceres::internal::Hash32NumWithSeed(h1, h2) :
ceres::internal::Hash64NumWithSeed(h1, h2);
}
// Less than operator for MSVC.
bool operator()(const pair<T, T>& a,
const pair<T, T>& b) const {
return a < b;
}
static const size_t bucket_size = 4; // These are required by MSVC
static const size_t min_buckets = 8; // 4 and 8 are defaults.
};
CERES_HASH_NAMESPACE_END
#endif // CERES_NO_UNORDERED_MAP
#endif // CERES_INTERNAL_COLLECTIONS_PORT_H_

View File

@@ -47,8 +47,10 @@ void CompressedColumnScalarMatrixToBlockMatrix(
const vector<int>& col_blocks,
vector<int>* block_rows,
vector<int>* block_cols) {
CHECK_NOTNULL(block_rows)->clear();
CHECK_NOTNULL(block_cols)->clear();
CHECK(block_rows != nullptr);
CHECK(block_cols != nullptr);
block_rows->clear();
block_cols->clear();
const int num_row_blocks = row_blocks.size();
const int num_col_blocks = col_blocks.size();

View File

@@ -30,6 +30,7 @@
#include "ceres/compressed_row_jacobian_writer.h"
#include <iterator>
#include <utility>
#include <vector>
@@ -70,7 +71,7 @@ void CompressedRowJacobianWriter::PopulateJacobianRowAndColumnBlockVectors(
void CompressedRowJacobianWriter::GetOrderedParameterBlocks(
const Program* program,
int residual_id,
vector<pair<int, int> >* evaluated_jacobian_blocks) {
vector<pair<int, int>>* evaluated_jacobian_blocks) {
const ResidualBlock* residual_block =
program->residual_blocks()[residual_id];
const int num_parameter_blocks = residual_block->NumParameterBlocks();
@@ -121,6 +122,7 @@ SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
// seems to be the only way to construct it without doing a memory copy.
int* rows = jacobian->mutable_rows();
int* cols = jacobian->mutable_cols();
int row_pos = 0;
rows[0] = 0;
for (int i = 0; i < residual_blocks.size(); ++i) {
@@ -206,7 +208,7 @@ void CompressedRowJacobianWriter::Write(int residual_id,
program_->residual_blocks()[residual_id];
const int num_residuals = residual_block->NumResiduals();
vector<pair<int, int> > evaluated_jacobian_blocks;
vector<pair<int, int>> evaluated_jacobian_blocks;
GetOrderedParameterBlocks(program_, residual_id, &evaluated_jacobian_blocks);
// Where in the current row does the jacobian for a parameter block begin.

View File

@@ -83,7 +83,7 @@ class CompressedRowJacobianWriter {
static void GetOrderedParameterBlocks(
const Program* program,
int residual_id,
std::vector<std::pair<int, int> >* evaluated_jacobian_blocks);
std::vector<std::pair<int, int>>* evaluated_jacobian_blocks);
// JacobianWriter interface.

View File

@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -35,6 +35,7 @@
#include <vector>
#include "ceres/crs_matrix.h"
#include "ceres/internal/port.h"
#include "ceres/random.h"
#include "ceres/triplet_sparse_matrix.h"
#include "glog/logging.h"
@@ -54,9 +55,7 @@ namespace {
//
// If this is the case, this functor will not be a StrictWeakOrdering.
struct RowColLessThan {
RowColLessThan(const int* rows, const int* cols)
: rows(rows), cols(cols) {
}
RowColLessThan(const int* rows, const int* cols) : rows(rows), cols(cols) {}
bool operator()(const int x, const int y) const {
if (rows[x] == rows[y]) {
@@ -69,6 +68,91 @@ struct RowColLessThan {
const int* cols;
};
void TransposeForCompressedRowSparseStructure(const int num_rows,
const int num_cols,
const int num_nonzeros,
const int* rows,
const int* cols,
const double* values,
int* transpose_rows,
int* transpose_cols,
double* transpose_values) {
// Explicitly zero out transpose_rows.
std::fill(transpose_rows, transpose_rows + num_cols + 1, 0);
// Count the number of entries in each column of the original matrix
// and assign to transpose_rows[col + 1].
for (int idx = 0; idx < num_nonzeros; ++idx) {
++transpose_rows[cols[idx] + 1];
}
// Compute the starting position for each row in the transpose by
// computing the cumulative sum of the entries of transpose_rows.
for (int i = 1; i < num_cols + 1; ++i) {
transpose_rows[i] += transpose_rows[i - 1];
}
// Populate transpose_cols and (optionally) transpose_values by
// walking the entries of the source matrices. For each entry that
// is added, the value of transpose_row is incremented allowing us
// to keep track of where the next entry for that row should go.
//
// As a result transpose_row is shifted to the left by one entry.
for (int r = 0; r < num_rows; ++r) {
for (int idx = rows[r]; idx < rows[r + 1]; ++idx) {
const int c = cols[idx];
const int transpose_idx = transpose_rows[c]++;
transpose_cols[transpose_idx] = r;
if (values != NULL && transpose_values != NULL) {
transpose_values[transpose_idx] = values[idx];
}
}
}
// This loop undoes the left shift to transpose_rows introduced by
// the previous loop.
for (int i = num_cols - 1; i > 0; --i) {
transpose_rows[i] = transpose_rows[i - 1];
}
transpose_rows[0] = 0;
}
void AddRandomBlock(const int num_rows,
const int num_cols,
const int row_block_begin,
const int col_block_begin,
std::vector<int>* rows,
std::vector<int>* cols,
std::vector<double>* values) {
for (int r = 0; r < num_rows; ++r) {
for (int c = 0; c < num_cols; ++c) {
rows->push_back(row_block_begin + r);
cols->push_back(col_block_begin + c);
values->push_back(RandNormal());
}
}
}
void AddSymmetricRandomBlock(const int num_rows,
const int row_block_begin,
std::vector<int>* rows,
std::vector<int>* cols,
std::vector<double>* values) {
for (int r = 0; r < num_rows; ++r) {
for (int c = r; c < num_rows; ++c) {
const double v = RandNormal();
rows->push_back(row_block_begin + r);
cols->push_back(row_block_begin + c);
values->push_back(v);
if (r != c) {
rows->push_back(row_block_begin + c);
cols->push_back(row_block_begin + r);
values->push_back(v);
}
}
}
}
} // namespace
// This constructor gives you a semi-initialized CompressedRowSparseMatrix.
@@ -77,69 +161,96 @@ CompressedRowSparseMatrix::CompressedRowSparseMatrix(int num_rows,
int max_num_nonzeros) {
num_rows_ = num_rows;
num_cols_ = num_cols;
storage_type_ = UNSYMMETRIC;
rows_.resize(num_rows + 1, 0);
cols_.resize(max_num_nonzeros, 0);
values_.resize(max_num_nonzeros, 0.0);
VLOG(1) << "# of rows: " << num_rows_
<< " # of columns: " << num_cols_
<< " max_num_nonzeros: " << cols_.size()
<< ". Allocating " << (num_rows_ + 1) * sizeof(int) + // NOLINT
cols_.size() * sizeof(int) + // NOLINT
cols_.size() * sizeof(double); // NOLINT
VLOG(1) << "# of rows: " << num_rows_ << " # of columns: " << num_cols_
<< " max_num_nonzeros: " << cols_.size() << ". Allocating "
<< (num_rows_ + 1) * sizeof(int) + // NOLINT
cols_.size() * sizeof(int) + // NOLINT
cols_.size() * sizeof(double); // NOLINT
}
CompressedRowSparseMatrix::CompressedRowSparseMatrix(
const TripletSparseMatrix& m) {
num_rows_ = m.num_rows();
num_cols_ = m.num_cols();
CompressedRowSparseMatrix* CompressedRowSparseMatrix::FromTripletSparseMatrix(
const TripletSparseMatrix& input) {
return CompressedRowSparseMatrix::FromTripletSparseMatrix(input, false);
}
rows_.resize(num_rows_ + 1, 0);
cols_.resize(m.num_nonzeros(), 0);
values_.resize(m.max_num_nonzeros(), 0.0);
CompressedRowSparseMatrix*
CompressedRowSparseMatrix::FromTripletSparseMatrixTransposed(
const TripletSparseMatrix& input) {
return CompressedRowSparseMatrix::FromTripletSparseMatrix(input, true);
}
// index is the list of indices into the TripletSparseMatrix m.
vector<int> index(m.num_nonzeros(), 0);
for (int i = 0; i < m.num_nonzeros(); ++i) {
CompressedRowSparseMatrix* CompressedRowSparseMatrix::FromTripletSparseMatrix(
const TripletSparseMatrix& input, bool transpose) {
int num_rows = input.num_rows();
int num_cols = input.num_cols();
const int* rows = input.rows();
const int* cols = input.cols();
const double* values = input.values();
if (transpose) {
std::swap(num_rows, num_cols);
std::swap(rows, cols);
}
// index is the list of indices into the TripletSparseMatrix input.
vector<int> index(input.num_nonzeros(), 0);
for (int i = 0; i < input.num_nonzeros(); ++i) {
index[i] = i;
}
// Sort index such that the entries of m are ordered by row and ties
// are broken by column.
sort(index.begin(), index.end(), RowColLessThan(m.rows(), m.cols()));
std::sort(index.begin(), index.end(), RowColLessThan(rows, cols));
VLOG(1) << "# of rows: " << num_rows_
<< " # of columns: " << num_cols_
<< " max_num_nonzeros: " << cols_.size()
<< ". Allocating "
<< ((num_rows_ + 1) * sizeof(int) + // NOLINT
cols_.size() * sizeof(int) + // NOLINT
cols_.size() * sizeof(double)); // NOLINT
VLOG(1) << "# of rows: " << num_rows << " # of columns: " << num_cols
<< " num_nonzeros: " << input.num_nonzeros() << ". Allocating "
<< ((num_rows + 1) * sizeof(int) + // NOLINT
input.num_nonzeros() * sizeof(int) + // NOLINT
input.num_nonzeros() * sizeof(double)); // NOLINT
CompressedRowSparseMatrix* output =
new CompressedRowSparseMatrix(num_rows, num_cols, input.num_nonzeros());
if (num_rows == 0) {
// No data to copy.
return output;
}
// Copy the contents of the cols and values array in the order given
// by index and count the number of entries in each row.
for (int i = 0; i < m.num_nonzeros(); ++i) {
int* output_rows = output->mutable_rows();
int* output_cols = output->mutable_cols();
double* output_values = output->mutable_values();
output_rows[0] = 0;
for (int i = 0; i < index.size(); ++i) {
const int idx = index[i];
++rows_[m.rows()[idx] + 1];
cols_[i] = m.cols()[idx];
values_[i] = m.values()[idx];
++output_rows[rows[idx] + 1];
output_cols[i] = cols[idx];
output_values[i] = values[idx];
}
// Find the cumulative sum of the row counts.
for (int i = 1; i < num_rows_ + 1; ++i) {
rows_[i] += rows_[i - 1];
for (int i = 1; i < num_rows + 1; ++i) {
output_rows[i] += output_rows[i - 1];
}
CHECK_EQ(num_nonzeros(), m.num_nonzeros());
CHECK_EQ(output->num_nonzeros(), input.num_nonzeros());
return output;
}
CompressedRowSparseMatrix::CompressedRowSparseMatrix(const double* diagonal,
int num_rows) {
CHECK_NOTNULL(diagonal);
CHECK(diagonal != nullptr);
num_rows_ = num_rows;
num_cols_ = num_rows;
storage_type_ = UNSYMMETRIC;
rows_.resize(num_rows + 1);
cols_.resize(num_rows);
values_.resize(num_rows);
@@ -154,47 +265,150 @@ CompressedRowSparseMatrix::CompressedRowSparseMatrix(const double* diagonal,
CHECK_EQ(num_nonzeros(), num_rows);
}
CompressedRowSparseMatrix::~CompressedRowSparseMatrix() {
}
CompressedRowSparseMatrix::~CompressedRowSparseMatrix() {}
void CompressedRowSparseMatrix::SetZero() {
std::fill(values_.begin(), values_.end(), 0);
}
// TODO(sameeragarwal): Make RightMultiply and LeftMultiply
// block-aware for higher performance.
void CompressedRowSparseMatrix::RightMultiply(const double* x,
double* y) const {
CHECK_NOTNULL(x);
CHECK_NOTNULL(y);
CHECK(x != nullptr);
CHECK(y != nullptr);
for (int r = 0; r < num_rows_; ++r) {
for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
y[r] += values_[idx] * x[cols_[idx]];
if (storage_type_ == UNSYMMETRIC) {
for (int r = 0; r < num_rows_; ++r) {
for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
const int c = cols_[idx];
const double v = values_[idx];
y[r] += v * x[c];
}
}
} else if (storage_type_ == UPPER_TRIANGULAR) {
// Because of their block structure, we will have entries that lie
// above (below) the diagonal for lower (upper) triangular matrices,
// so the loops below need to account for this.
for (int r = 0; r < num_rows_; ++r) {
int idx = rows_[r];
const int idx_end = rows_[r + 1];
// For upper triangular matrices r <= c, so skip entries with r
// > c.
while (idx < idx_end && r > cols_[idx]) {
++idx;
}
for (; idx < idx_end; ++idx) {
const int c = cols_[idx];
const double v = values_[idx];
y[r] += v * x[c];
// Since we are only iterating over the upper triangular part
// of the matrix, add contributions for the strictly lower
// triangular part.
if (r != c) {
y[c] += v * x[r];
}
}
}
} else if (storage_type_ == LOWER_TRIANGULAR) {
for (int r = 0; r < num_rows_; ++r) {
int idx = rows_[r];
const int idx_end = rows_[r + 1];
// For lower triangular matrices, we only iterate till we are r >=
// c.
for (; idx < idx_end && r >= cols_[idx]; ++idx) {
const int c = cols_[idx];
const double v = values_[idx];
y[r] += v * x[c];
// Since we are only iterating over the lower triangular part
// of the matrix, add contributions for the strictly upper
// triangular part.
if (r != c) {
y[c] += v * x[r];
}
}
}
} else {
LOG(FATAL) << "Unknown storage type: " << storage_type_;
}
}
void CompressedRowSparseMatrix::LeftMultiply(const double* x, double* y) const {
CHECK_NOTNULL(x);
CHECK_NOTNULL(y);
CHECK(x != nullptr);
CHECK(y != nullptr);
for (int r = 0; r < num_rows_; ++r) {
for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
y[cols_[idx]] += values_[idx] * x[r];
if (storage_type_ == UNSYMMETRIC) {
for (int r = 0; r < num_rows_; ++r) {
for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
y[cols_[idx]] += values_[idx] * x[r];
}
}
} else {
// Since the matrix is symmetric, LeftMultiply = RightMultiply.
RightMultiply(x, y);
}
}
void CompressedRowSparseMatrix::SquaredColumnNorm(double* x) const {
CHECK_NOTNULL(x);
CHECK(x != nullptr);
std::fill(x, x + num_cols_, 0.0);
for (int idx = 0; idx < rows_[num_rows_]; ++idx) {
x[cols_[idx]] += values_[idx] * values_[idx];
if (storage_type_ == UNSYMMETRIC) {
for (int idx = 0; idx < rows_[num_rows_]; ++idx) {
x[cols_[idx]] += values_[idx] * values_[idx];
}
} else if (storage_type_ == UPPER_TRIANGULAR) {
// Because of their block structure, we will have entries that lie
// above (below) the diagonal for lower (upper) triangular
// matrices, so the loops below need to account for this.
for (int r = 0; r < num_rows_; ++r) {
int idx = rows_[r];
const int idx_end = rows_[r + 1];
// For upper triangular matrices r <= c, so skip entries with r
// > c.
while (idx < idx_end && r > cols_[idx]) {
++idx;
}
for (; idx < idx_end; ++idx) {
const int c = cols_[idx];
const double v2 = values_[idx] * values_[idx];
x[c] += v2;
// Since we are only iterating over the upper triangular part
// of the matrix, add contributions for the strictly lower
// triangular part.
if (r != c) {
x[r] += v2;
}
}
}
} else if (storage_type_ == LOWER_TRIANGULAR) {
for (int r = 0; r < num_rows_; ++r) {
int idx = rows_[r];
const int idx_end = rows_[r + 1];
// For lower triangular matrices, we only iterate till we are r >=
// c.
for (; idx < idx_end && r >= cols_[idx]; ++idx) {
const int c = cols_[idx];
const double v2 = values_[idx] * values_[idx];
x[c] += v2;
// Since we are only iterating over the lower triangular part
// of the matrix, add contributions for the strictly upper
// triangular part.
if (r != c) {
x[r] += v2;
}
}
}
} else {
LOG(FATAL) << "Unknown storage type: " << storage_type_;
}
}
void CompressedRowSparseMatrix::ScaleColumns(const double* scale) {
CHECK_NOTNULL(scale);
CHECK(scale != nullptr);
for (int idx = 0; idx < rows_[num_rows_]; ++idx) {
values_[idx] *= scale[cols_[idx]];
@@ -202,7 +416,7 @@ void CompressedRowSparseMatrix::ScaleColumns(const double* scale) {
}
void CompressedRowSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
CHECK_NOTNULL(dense_matrix);
CHECK(dense_matrix != nullptr);
dense_matrix->resize(num_rows_, num_cols_);
dense_matrix->setZero();
@@ -216,10 +430,17 @@ void CompressedRowSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
void CompressedRowSparseMatrix::DeleteRows(int delta_rows) {
CHECK_GE(delta_rows, 0);
CHECK_LE(delta_rows, num_rows_);
CHECK_EQ(storage_type_, UNSYMMETRIC);
num_rows_ -= delta_rows;
rows_.resize(num_rows_ + 1);
// The rest of the code updates the block information. Immediately
// return in case of no block information.
if (row_blocks_.empty()) {
return;
}
// Walk the list of row blocks until we reach the new number of rows
// and the drop the rest of the row blocks.
int num_row_blocks = 0;
@@ -233,9 +454,11 @@ void CompressedRowSparseMatrix::DeleteRows(int delta_rows) {
}
void CompressedRowSparseMatrix::AppendRows(const CompressedRowSparseMatrix& m) {
CHECK_EQ(storage_type_, UNSYMMETRIC);
CHECK_EQ(m.num_cols(), num_cols_);
CHECK(row_blocks_.size() == 0 || m.row_blocks().size() !=0)
CHECK((row_blocks_.empty() && m.row_blocks().empty()) ||
(!row_blocks_.empty() && !m.row_blocks().empty()))
<< "Cannot append a matrix with row blocks to one without and vice versa."
<< "This matrix has : " << row_blocks_.size() << " row blocks."
<< "The matrix being appended has: " << m.row_blocks().size()
@@ -254,9 +477,8 @@ void CompressedRowSparseMatrix::AppendRows(const CompressedRowSparseMatrix& m) {
DCHECK_LT(num_nonzeros(), cols_.size());
if (m.num_nonzeros() > 0) {
std::copy(m.cols(), m.cols() + m.num_nonzeros(), &cols_[num_nonzeros()]);
std::copy(m.values(),
m.values() + m.num_nonzeros(),
&values_[num_nonzeros()]);
std::copy(
m.values(), m.values() + m.num_nonzeros(), &values_[num_nonzeros()]);
}
rows_.resize(num_rows_ + m.num_rows() + 1);
@@ -270,20 +492,22 @@ void CompressedRowSparseMatrix::AppendRows(const CompressedRowSparseMatrix& m) {
}
num_rows_ += m.num_rows();
row_blocks_.insert(row_blocks_.end(),
m.row_blocks().begin(),
m.row_blocks().end());
// The rest of the code updates the block information. Immediately
// return in case of no block information.
if (row_blocks_.empty()) {
return;
}
row_blocks_.insert(
row_blocks_.end(), m.row_blocks().begin(), m.row_blocks().end());
}
void CompressedRowSparseMatrix::ToTextFile(FILE* file) const {
CHECK_NOTNULL(file);
CHECK(file != nullptr);
for (int r = 0; r < num_rows_; ++r) {
for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
fprintf(file,
"% 10d % 10d %17f\n",
r,
cols_[idx],
values_[idx]);
fprintf(file, "% 10d % 10d %17f\n", r, cols_[idx], values_[idx]);
}
}
}
@@ -308,29 +532,8 @@ void CompressedRowSparseMatrix::SetMaxNumNonZeros(int num_nonzeros) {
values_.resize(num_nonzeros);
}
void CompressedRowSparseMatrix::SolveLowerTriangularInPlace(
double* solution) const {
for (int r = 0; r < num_rows_; ++r) {
for (int idx = rows_[r]; idx < rows_[r + 1] - 1; ++idx) {
solution[r] -= values_[idx] * solution[cols_[idx]];
}
solution[r] /= values_[rows_[r + 1] - 1];
}
}
void CompressedRowSparseMatrix::SolveLowerTriangularTransposeInPlace(
double* solution) const {
for (int r = num_rows_ - 1; r >= 0; --r) {
solution[r] /= values_[rows_[r + 1] - 1];
for (int idx = rows_[r + 1] - 2; idx >= rows_[r]; --idx) {
solution[cols_[idx]] -= values_[idx] * solution[r];
}
}
}
CompressedRowSparseMatrix* CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
const double* diagonal,
const vector<int>& blocks) {
const double* diagonal, const vector<int>& blocks) {
int num_rows = 0;
int num_nonzeros = 0;
for (int i = 0; i < blocks.size(); ++i) {
@@ -373,189 +576,152 @@ CompressedRowSparseMatrix* CompressedRowSparseMatrix::Transpose() const {
CompressedRowSparseMatrix* transpose =
new CompressedRowSparseMatrix(num_cols_, num_rows_, num_nonzeros());
int* transpose_rows = transpose->mutable_rows();
int* transpose_cols = transpose->mutable_cols();
double* transpose_values = transpose->mutable_values();
switch (storage_type_) {
case UNSYMMETRIC:
transpose->set_storage_type(UNSYMMETRIC);
break;
case LOWER_TRIANGULAR:
transpose->set_storage_type(UPPER_TRIANGULAR);
break;
case UPPER_TRIANGULAR:
transpose->set_storage_type(LOWER_TRIANGULAR);
break;
default:
LOG(FATAL) << "Unknown storage type: " << storage_type_;
};
for (int idx = 0; idx < num_nonzeros(); ++idx) {
++transpose_rows[cols_[idx] + 1];
}
TransposeForCompressedRowSparseStructure(num_rows(),
num_cols(),
num_nonzeros(),
rows(),
cols(),
values(),
transpose->mutable_rows(),
transpose->mutable_cols(),
transpose->mutable_values());
for (int i = 1; i < transpose->num_rows() + 1; ++i) {
transpose_rows[i] += transpose_rows[i - 1];
// The rest of the code updates the block information. Immediately
// return in case of no block information.
if (row_blocks_.empty()) {
return transpose;
}
for (int r = 0; r < num_rows(); ++r) {
for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
const int c = cols_[idx];
const int transpose_idx = transpose_rows[c]++;
transpose_cols[transpose_idx] = r;
transpose_values[transpose_idx] = values_[idx];
}
}
for (int i = transpose->num_rows() - 1; i > 0 ; --i) {
transpose_rows[i] = transpose_rows[i - 1];
}
transpose_rows[0] = 0;
*(transpose->mutable_row_blocks()) = col_blocks_;
*(transpose->mutable_col_blocks()) = row_blocks_;
return transpose;
}
namespace {
// A ProductTerm is a term in the outer product of a matrix with
// itself.
struct ProductTerm {
ProductTerm(const int row, const int col, const int index)
: row(row), col(col), index(index) {
CompressedRowSparseMatrix* CompressedRowSparseMatrix::CreateRandomMatrix(
CompressedRowSparseMatrix::RandomMatrixOptions options) {
CHECK_GT(options.num_row_blocks, 0);
CHECK_GT(options.min_row_block_size, 0);
CHECK_GT(options.max_row_block_size, 0);
CHECK_LE(options.min_row_block_size, options.max_row_block_size);
if (options.storage_type == UNSYMMETRIC) {
CHECK_GT(options.num_col_blocks, 0);
CHECK_GT(options.min_col_block_size, 0);
CHECK_GT(options.max_col_block_size, 0);
CHECK_LE(options.min_col_block_size, options.max_col_block_size);
} else {
// Symmetric matrices (LOWER_TRIANGULAR or UPPER_TRIANGULAR);
options.num_col_blocks = options.num_row_blocks;
options.min_col_block_size = options.min_row_block_size;
options.max_col_block_size = options.max_row_block_size;
}
bool operator<(const ProductTerm& right) const {
if (row == right.row) {
if (col == right.col) {
return index < right.index;
}
return col < right.col;
CHECK_GT(options.block_density, 0.0);
CHECK_LE(options.block_density, 1.0);
vector<int> row_blocks;
vector<int> col_blocks;
// Generate the row block structure.
for (int i = 0; i < options.num_row_blocks; ++i) {
// Generate a random integer in [min_row_block_size, max_row_block_size]
const int delta_block_size =
Uniform(options.max_row_block_size - options.min_row_block_size);
row_blocks.push_back(options.min_row_block_size + delta_block_size);
}
if (options.storage_type == UNSYMMETRIC) {
// Generate the col block structure.
for (int i = 0; i < options.num_col_blocks; ++i) {
// Generate a random integer in [min_col_block_size, max_col_block_size]
const int delta_block_size =
Uniform(options.max_col_block_size - options.min_col_block_size);
col_blocks.push_back(options.min_col_block_size + delta_block_size);
}
return row < right.row;
} else {
// Symmetric matrices (LOWER_TRIANGULAR or UPPER_TRIANGULAR);
col_blocks = row_blocks;
}
int row;
int col;
int index;
};
vector<int> tsm_rows;
vector<int> tsm_cols;
vector<double> tsm_values;
CompressedRowSparseMatrix*
CompressAndFillProgram(const int num_rows,
const int num_cols,
const vector<ProductTerm>& product,
vector<int>* program) {
CHECK_GT(product.size(), 0);
// For ease of construction, we are going to generate the
// CompressedRowSparseMatrix by generating it as a
// TripletSparseMatrix and then converting it to a
// CompressedRowSparseMatrix.
// Count the number of unique product term, which in turn is the
// number of non-zeros in the outer product.
int num_nonzeros = 1;
for (int i = 1; i < product.size(); ++i) {
if (product[i].row != product[i - 1].row ||
product[i].col != product[i - 1].col) {
++num_nonzeros;
}
}
// It is possible that the random matrix is empty which is likely
// not what the user wants, so do the matrix generation till we have
// at least one non-zero entry.
while (tsm_values.empty()) {
tsm_rows.clear();
tsm_cols.clear();
tsm_values.clear();
CompressedRowSparseMatrix* matrix =
new CompressedRowSparseMatrix(num_rows, num_cols, num_nonzeros);
int* crsm_rows = matrix->mutable_rows();
std::fill(crsm_rows, crsm_rows + num_rows + 1, 0);
int* crsm_cols = matrix->mutable_cols();
std::fill(crsm_cols, crsm_cols + num_nonzeros, 0);
CHECK_NOTNULL(program)->clear();
program->resize(product.size());
// Iterate over the sorted product terms. This means each row is
// filled one at a time, and we are able to assign a position in the
// values array to each term.
//
// If terms repeat, i.e., they contribute to the same entry in the
// result matrix), then they do not affect the sparsity structure of
// the result matrix.
int nnz = 0;
crsm_cols[0] = product[0].col;
crsm_rows[product[0].row + 1]++;
(*program)[product[0].index] = nnz;
for (int i = 1; i < product.size(); ++i) {
const ProductTerm& previous = product[i - 1];
const ProductTerm& current = product[i];
// Sparsity structure is updated only if the term is not a repeat.
if (previous.row != current.row || previous.col != current.col) {
crsm_cols[++nnz] = current.col;
crsm_rows[current.row + 1]++;
}
// All terms get assigned the position in the values array where
// their value is accumulated.
(*program)[current.index] = nnz;
}
for (int i = 1; i < num_rows + 1; ++i) {
crsm_rows[i] += crsm_rows[i - 1];
}
return matrix;
}
} // namespace
CompressedRowSparseMatrix*
CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
const CompressedRowSparseMatrix& m,
vector<int>* program) {
CHECK_NOTNULL(program)->clear();
CHECK_GT(m.num_nonzeros(), 0)
<< "Congratulations, "
<< "you found a bug in Ceres. Please report it.";
vector<ProductTerm> product;
const vector<int>& row_blocks = m.row_blocks();
int row_block_begin = 0;
// Iterate over row blocks
for (int row_block = 0; row_block < row_blocks.size(); ++row_block) {
const int row_block_end = row_block_begin + row_blocks[row_block];
// Compute the outer product terms for just one row per row block.
const int r = row_block_begin;
// Compute the lower triangular part of the product.
for (int idx1 = m.rows()[r]; idx1 < m.rows()[r + 1]; ++idx1) {
for (int idx2 = m.rows()[r]; idx2 <= idx1; ++idx2) {
product.push_back(ProductTerm(m.cols()[idx1],
m.cols()[idx2],
product.size()));
}
}
row_block_begin = row_block_end;
}
CHECK_EQ(row_block_begin, m.num_rows());
sort(product.begin(), product.end());
return CompressAndFillProgram(m.num_cols(), m.num_cols(), product, program);
}
void CompressedRowSparseMatrix::ComputeOuterProduct(
const CompressedRowSparseMatrix& m,
const vector<int>& program,
CompressedRowSparseMatrix* result) {
result->SetZero();
double* values = result->mutable_values();
const vector<int>& row_blocks = m.row_blocks();
int cursor = 0;
int row_block_begin = 0;
const double* m_values = m.values();
const int* m_rows = m.rows();
// Iterate over row blocks.
for (int row_block = 0; row_block < row_blocks.size(); ++row_block) {
const int row_block_end = row_block_begin + row_blocks[row_block];
const int saved_cursor = cursor;
for (int r = row_block_begin; r < row_block_end; ++r) {
// Reuse the program segment for each row in this row block.
cursor = saved_cursor;
const int row_begin = m_rows[r];
const int row_end = m_rows[r + 1];
for (int idx1 = row_begin; idx1 < row_end; ++idx1) {
const double v1 = m_values[idx1];
for (int idx2 = row_begin; idx2 <= idx1; ++idx2, ++cursor) {
values[program[cursor]] += v1 * m_values[idx2];
int row_block_begin = 0;
for (int r = 0; r < options.num_row_blocks; ++r) {
int col_block_begin = 0;
for (int c = 0; c < options.num_col_blocks; ++c) {
if (((options.storage_type == UPPER_TRIANGULAR) && (r > c)) ||
((options.storage_type == LOWER_TRIANGULAR) && (r < c))) {
col_block_begin += col_blocks[c];
continue;
}
// Randomly determine if this block is present or not.
if (RandDouble() <= options.block_density) {
// If the matrix is symmetric, then we take care to generate
// symmetric diagonal blocks.
if (options.storage_type == UNSYMMETRIC || r != c) {
AddRandomBlock(row_blocks[r],
col_blocks[c],
row_block_begin,
col_block_begin,
&tsm_rows,
&tsm_cols,
&tsm_values);
} else {
AddSymmetricRandomBlock(row_blocks[r],
row_block_begin,
&tsm_rows,
&tsm_cols,
&tsm_values);
}
}
col_block_begin += col_blocks[c];
}
row_block_begin += row_blocks[r];
}
row_block_begin = row_block_end;
}
CHECK_EQ(row_block_begin, m.num_rows());
CHECK_EQ(cursor, program.size());
const int num_rows = std::accumulate(row_blocks.begin(), row_blocks.end(), 0);
const int num_cols = std::accumulate(col_blocks.begin(), col_blocks.end(), 0);
const bool kDoNotTranspose = false;
CompressedRowSparseMatrix* matrix =
CompressedRowSparseMatrix::FromTripletSparseMatrix(
TripletSparseMatrix(
num_rows, num_cols, tsm_rows, tsm_cols, tsm_values),
kDoNotTranspose);
(*matrix->mutable_row_blocks()) = row_blocks;
(*matrix->mutable_col_blocks()) = col_blocks;
matrix->set_storage_type(options.storage_type);
return matrix;
}
} // namespace internal

View File

@@ -32,7 +32,6 @@
#define CERES_INTERNAL_COMPRESSED_ROW_SPARSE_MATRIX_H_
#include <vector>
#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
#include "ceres/sparse_matrix.h"
#include "ceres/types.h"
@@ -48,13 +47,35 @@ class TripletSparseMatrix;
class CompressedRowSparseMatrix : public SparseMatrix {
public:
// Build a matrix with the same content as the TripletSparseMatrix
// m. TripletSparseMatrix objects are easier to construct
// incrementally, so we use them to initialize SparseMatrix
// objects.
enum StorageType {
UNSYMMETRIC,
// Matrix is assumed to be symmetric but only the lower triangular
// part of the matrix is stored.
LOWER_TRIANGULAR,
// Matrix is assumed to be symmetric but only the upper triangular
// part of the matrix is stored.
UPPER_TRIANGULAR
};
// Create a matrix with the same content as the TripletSparseMatrix
// input. We assume that input does not have any repeated
// entries.
//
// We assume that m does not have any repeated entries.
explicit CompressedRowSparseMatrix(const TripletSparseMatrix& m);
// The storage type of the matrix is set to UNSYMMETRIC.
//
// Caller owns the result.
static CompressedRowSparseMatrix* FromTripletSparseMatrix(
const TripletSparseMatrix& input);
// Create a matrix with the same content as the TripletSparseMatrix
// input transposed. We assume that input does not have any repeated
// entries.
//
// The storage type of the matrix is set to UNSYMMETRIC.
//
// Caller owns the result.
static CompressedRowSparseMatrix* FromTripletSparseMatrixTransposed(
const TripletSparseMatrix& input);
// Use this constructor only if you know what you are doing. This
// creates a "blank" matrix with the appropriate amount of memory
@@ -67,30 +88,30 @@ class CompressedRowSparseMatrix : public SparseMatrix {
// manually, instead of going via the indirect route of first
// constructing a TripletSparseMatrix, which leads to more than
// double the peak memory usage.
CompressedRowSparseMatrix(int num_rows,
int num_cols,
int max_num_nonzeros);
//
// The storage type is set to UNSYMMETRIC.
CompressedRowSparseMatrix(int num_rows, int num_cols, int max_num_nonzeros);
// Build a square sparse diagonal matrix with num_rows rows and
// columns. The diagonal m(i,i) = diagonal(i);
//
// The storage type is set to UNSYMMETRIC
CompressedRowSparseMatrix(const double* diagonal, int num_rows);
virtual ~CompressedRowSparseMatrix();
// SparseMatrix interface.
virtual void SetZero();
virtual void RightMultiply(const double* x, double* y) const;
virtual void LeftMultiply(const double* x, double* y) const;
virtual void SquaredColumnNorm(double* x) const;
virtual void ScaleColumns(const double* scale);
virtual void ToDenseMatrix(Matrix* dense_matrix) const;
virtual void ToTextFile(FILE* file) const;
virtual int num_rows() const { return num_rows_; }
virtual int num_cols() const { return num_cols_; }
virtual int num_nonzeros() const { return rows_[num_rows_]; }
virtual const double* values() const { return &values_[0]; }
virtual double* mutable_values() { return &values_[0]; }
virtual ~CompressedRowSparseMatrix();
void SetZero() final;
void RightMultiply(const double* x, double* y) const final;
void LeftMultiply(const double* x, double* y) const final;
void SquaredColumnNorm(double* x) const final;
void ScaleColumns(const double* scale) final;
void ToDenseMatrix(Matrix* dense_matrix) const final;
void ToTextFile(FILE* file) const final;
int num_rows() const final { return num_rows_; }
int num_cols() const final { return num_cols_; }
int num_nonzeros() const final { return rows_[num_rows_]; }
const double* values() const final { return &values_[0]; }
double* mutable_values() final { return &values_[0]; }
// Delete the bottom delta_rows.
// num_rows -= delta_rows
@@ -102,18 +123,7 @@ class CompressedRowSparseMatrix : public SparseMatrix {
void ToCRSMatrix(CRSMatrix* matrix) const;
// Low level access methods that expose the structure of the matrix.
const int* cols() const { return &cols_[0]; }
int* mutable_cols() { return &cols_[0]; }
const int* rows() const { return &rows_[0]; }
int* mutable_rows() { return &rows_[0]; }
const std::vector<int>& row_blocks() const { return row_blocks_; }
std::vector<int>* mutable_row_blocks() { return &row_blocks_; }
const std::vector<int>& col_blocks() const { return col_blocks_; }
std::vector<int>* mutable_col_blocks() { return &col_blocks_; }
CompressedRowSparseMatrix* Transpose() const;
// Destructive array resizing method.
void SetMaxNumNonZeros(int num_nonzeros);
@@ -122,47 +132,86 @@ class CompressedRowSparseMatrix : public SparseMatrix {
void set_num_rows(const int num_rows) { num_rows_ = num_rows; }
void set_num_cols(const int num_cols) { num_cols_ = num_cols; }
void SolveLowerTriangularInPlace(double* solution) const;
void SolveLowerTriangularTransposeInPlace(double* solution) const;
// Low level access methods that expose the structure of the matrix.
const int* cols() const { return &cols_[0]; }
int* mutable_cols() { return &cols_[0]; }
CompressedRowSparseMatrix* Transpose() const;
const int* rows() const { return &rows_[0]; }
int* mutable_rows() { return &rows_[0]; }
const StorageType storage_type() const { return storage_type_; }
void set_storage_type(const StorageType storage_type) {
storage_type_ = storage_type;
}
const std::vector<int>& row_blocks() const { return row_blocks_; }
std::vector<int>* mutable_row_blocks() { return &row_blocks_; }
const std::vector<int>& col_blocks() const { return col_blocks_; }
std::vector<int>* mutable_col_blocks() { return &col_blocks_; }
// Create a block diagonal CompressedRowSparseMatrix with the given
// block structure. The individual blocks are assumed to be laid out
// contiguously in the diagonal array, one block at a time.
//
// Caller owns the result.
static CompressedRowSparseMatrix* CreateBlockDiagonalMatrix(
const double* diagonal,
const std::vector<int>& blocks);
const double* diagonal, const std::vector<int>& blocks);
// Compute the sparsity structure of the product m.transpose() * m
// and create a CompressedRowSparseMatrix corresponding to it.
// Options struct to control the generation of random block sparse
// matrices in compressed row sparse format.
//
// Also compute a "program" vector, which for every term in the
// outer product points to the entry in the values array of the
// result matrix where it should be accumulated.
// The random matrix generation proceeds as follows.
//
// This program is used by the ComputeOuterProduct function below to
// compute the outer product.
// First the row and column block structure is determined by
// generating random row and column block sizes that lie within the
// given bounds.
//
// Since the entries of the program are the same for rows with the
// same sparsity structure, the program only stores the result for
// one row per row block. The ComputeOuterProduct function reuses
// this information for each row in the row block.
static CompressedRowSparseMatrix* CreateOuterProductMatrixAndProgram(
const CompressedRowSparseMatrix& m,
std::vector<int>* program);
// Then we walk the block structure of the resulting matrix, and with
// probability block_density detemine whether they are structurally
// zero or not. If the answer is no, then we generate entries for the
// block which are distributed normally.
struct RandomMatrixOptions {
// Type of matrix to create.
//
// If storage_type is UPPER_TRIANGULAR (LOWER_TRIANGULAR), then
// create a square symmetric matrix with just the upper triangular
// (lower triangular) part. In this case, num_col_blocks,
// min_col_block_size and max_col_block_size will be ignored and
// assumed to be equal to the corresponding row settings.
StorageType storage_type = UNSYMMETRIC;
// Compute the values array for the expression m.transpose() * m,
// where the matrix used to store the result and a program have been
// created using the CreateOuterProductMatrixAndProgram function
// above.
static void ComputeOuterProduct(const CompressedRowSparseMatrix& m,
const std::vector<int>& program,
CompressedRowSparseMatrix* result);
int num_row_blocks = 0;
int min_row_block_size = 0;
int max_row_block_size = 0;
int num_col_blocks = 0;
int min_col_block_size = 0;
int max_col_block_size = 0;
// 0 < block_density <= 1 is the probability of a block being
// present in the matrix. A given random matrix will not have
// precisely this density.
double block_density = 0.0;
};
// Create a random CompressedRowSparseMatrix whose entries are
// normally distributed and whose structure is determined by
// RandomMatrixOptions.
//
// Caller owns the result.
static CompressedRowSparseMatrix* CreateRandomMatrix(
RandomMatrixOptions options);
private:
static CompressedRowSparseMatrix* FromTripletSparseMatrix(
const TripletSparseMatrix& input, bool transpose);
int num_rows_;
int num_cols_;
std::vector<int> rows_;
std::vector<int> cols_;
std::vector<double> values_;
StorageType storage_type_;
// If the matrix has an underlying block structure, then it can also
// carry with it row and column block sizes. This is auxilliary and
@@ -171,8 +220,6 @@ class CompressedRowSparseMatrix : public SparseMatrix {
// any way.
std::vector<int> row_blocks_;
std::vector<int> col_blocks_;
CERES_DISALLOW_COPY_AND_ASSIGN(CompressedRowSparseMatrix);
};
} // namespace internal

View File

@@ -0,0 +1,159 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2018 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: vitus@google.com (Michael Vitus)
#ifndef CERES_INTERNAL_CONCURRENT_QUEUE_H_
#define CERES_INTERNAL_CONCURRENT_QUEUE_H_
#include <condition_variable>
#include <mutex>
#include <queue>
#include <thread>
#include "glog/logging.h"
namespace ceres {
namespace internal {
// A thread-safe multi-producer, multi-consumer queue for queueing items that
// are typically handled asynchronously by multiple threads. The ConcurrentQueue
// has two states which only affect the Wait call:
//
// (1) Waiters have been enabled (enabled by default or calling
// EnableWaiters). The call to Wait will block until an item is available.
// Push and pop will operate as expected.
//
// (2) StopWaiters has been called. All threads blocked in a Wait() call will
// be woken up and pop any available items from the queue. All future Wait
// requests will either return an element from the queue or return
// immediately if no element is present. Push and pop will operate as
// expected.
//
// A common use case is using the concurrent queue as an interface for
// scheduling tasks for a set of thread workers:
//
// ConcurrentQueue<Task> task_queue;
//
// [Worker threads]:
// Task task;
// while(task_queue.Wait(&task)) {
// ...
// }
//
// [Producers]:
// task_queue.Push(...);
// ..
// task_queue.Push(...);
// ...
// // Signal worker threads to stop blocking on Wait and terminate.
// task_queue.StopWaiters();
//
template <typename T>
class ConcurrentQueue {
public:
// Defaults the queue to blocking on Wait calls.
ConcurrentQueue() : wait_(true) {}
// Atomically push an element onto the queue. If a thread was waiting for an
// element, wake it up.
void Push(const T& value) {
std::lock_guard<std::mutex> lock(mutex_);
queue_.push(value);
work_pending_condition_.notify_one();
}
// Atomically pop an element from the queue. If an element is present, return
// true. If the queue was empty, return false.
bool Pop(T* value) {
CHECK(value != nullptr);
std::lock_guard<std::mutex> lock(mutex_);
return PopUnlocked(value);
}
// Atomically pop an element from the queue. Blocks until one is available or
// StopWaiters is called. Returns true if an element was successfully popped
// from the queue, otherwise returns false.
bool Wait(T* value) {
CHECK(value != nullptr);
std::unique_lock<std::mutex> lock(mutex_);
work_pending_condition_.wait(lock,
[&]() { return !(wait_ && queue_.empty()); });
return PopUnlocked(value);
}
// Unblock all threads waiting to pop a value from the queue, and they will
// exit Wait() without getting a value. All future Wait requests will return
// immediately if no element is present until EnableWaiters is called.
void StopWaiters() {
std::lock_guard<std::mutex> lock(mutex_);
wait_ = false;
work_pending_condition_.notify_all();
}
// Enable threads to block on Wait calls.
void EnableWaiters() {
std::lock_guard<std::mutex> lock(mutex_);
wait_ = true;
}
private:
// Pops an element from the queue. If an element is present, return
// true. If the queue was empty, return false. Not thread-safe. Must acquire
// the lock before calling.
bool PopUnlocked(T* value) {
if (queue_.empty()) {
return false;
}
*value = queue_.front();
queue_.pop();
return true;
}
// The mutex controls read and write access to the queue_ and stop_
// variables. It is also used to block the calling thread until an element is
// available to pop from the queue.
std::mutex mutex_;
std::condition_variable work_pending_condition_;
std::queue<T> queue_;
// If true, signals that callers of Wait will block waiting to pop an
// element off the queue.
bool wait_;
};
} // namespace internal
} // namespace ceres
#endif // CERES_INTERNAL_CONCURRENT_QUEUE_H_

View File

@@ -68,7 +68,7 @@ ConditionedCostFunction::ConditionedCostFunction(
ConditionedCostFunction::~ConditionedCostFunction() {
if (ownership_ == TAKE_OWNERSHIP) {
STLDeleteElements(&conditioners_);
STLDeleteUniqueContainerPointers(conditioners_.begin(), conditioners_.end());
} else {
wrapped_cost_function_.release();
}

View File

@@ -41,7 +41,6 @@
#include <cmath>
#include <cstddef>
#include "ceres/fpclassify.h"
#include "ceres/internal/eigen.h"
#include "ceres/linear_operator.h"
#include "ceres/stringprintf.h"
@@ -53,7 +52,7 @@ namespace internal {
namespace {
bool IsZeroOrInfinity(double x) {
return ((x == 0.0) || (IsInfinite(x)));
return ((x == 0.0) || std::isinf(x));
}
} // namespace
@@ -68,9 +67,9 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
const double* b,
const LinearSolver::PerSolveOptions& per_solve_options,
double* x) {
CHECK_NOTNULL(A);
CHECK_NOTNULL(x);
CHECK_NOTNULL(b);
CHECK(A != nullptr);
CHECK(x != nullptr);
CHECK(b != nullptr);
CHECK_EQ(A->num_rows(), A->num_cols());
LinearSolver::Summary summary;
@@ -148,7 +147,7 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
q.setZero();
A->RightMultiply(p.data(), q.data());
const double pq = p.dot(q);
if ((pq <= 0) || IsInfinite(pq)) {
if ((pq <= 0) || std::isinf(pq)) {
summary.termination_type = LINEAR_SOLVER_NO_CONVERGENCE;
summary.message = StringPrintf(
"Matrix is indefinite, no more progress can be made. "
@@ -158,7 +157,7 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
}
const double alpha = rho / pq;
if (IsInfinite(alpha)) {
if (std::isinf(alpha)) {
summary.termination_type = LINEAR_SOLVER_FAILURE;
summary.message =
StringPrintf("Numerical failure. alpha = rho / pq = %e, "

View File

@@ -35,7 +35,6 @@
#define CERES_INTERNAL_CONJUGATE_GRADIENTS_SOLVER_H_
#include "ceres/linear_solver.h"
#include "ceres/internal/macros.h"
namespace ceres {
namespace internal {
@@ -58,14 +57,13 @@ class LinearOperator;
class ConjugateGradientsSolver : public LinearSolver {
public:
explicit ConjugateGradientsSolver(const LinearSolver::Options& options);
virtual Summary Solve(LinearOperator* A,
const double* b,
const LinearSolver::PerSolveOptions& per_solve_options,
double* x);
Summary Solve(LinearOperator* A,
const double* b,
const LinearSolver::PerSolveOptions& per_solve_options,
double* x) final;
private:
const LinearSolver::Options options_;
CERES_DISALLOW_COPY_AND_ASSIGN(ConjugateGradientsSolver);
};
} // namespace internal

41
extern/ceres/internal/ceres/context.cc vendored Normal file
View File

@@ -0,0 +1,41 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2018 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: vitus@google.com (Michael Vitus)
#include "ceres/context.h"
#include "ceres/context_impl.h"
namespace ceres {
Context* Context::Create() {
return new internal::ContextImpl();
}
} // namespace ceres

View File

@@ -0,0 +1,43 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2018 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: vitus@google.com (Michael Vitus)
#include "ceres/context_impl.h"
namespace ceres {
namespace internal {
void ContextImpl::EnsureMinimumThreads(int num_threads) {
#ifdef CERES_USE_CXX_THREADS
thread_pool.Resize(num_threads);
#endif // CERES_USE_CXX_THREADS
}
} // namespace internal
} // namespace ceres

View File

@@ -0,0 +1,67 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2018 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: vitus@google.com (Michael Vitus)
#ifndef CERES_INTERNAL_CONTEXT_IMPL_H_
#define CERES_INTERNAL_CONTEXT_IMPL_H_
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#include "ceres/context.h"
#ifdef CERES_USE_CXX_THREADS
#include "ceres/thread_pool.h"
#endif // CERES_USE_CXX_THREADS
namespace ceres {
namespace internal {
class ContextImpl : public Context {
public:
ContextImpl() {}
ContextImpl(const ContextImpl&) = delete;
void operator=(const ContextImpl&) = delete;
virtual ~ContextImpl() {}
// When compiled with C++ threading support, resize the thread pool to have
// at min(num_thread, num_hardware_threads) where num_hardware_threads is
// defined by the hardware. Otherwise this call is a no-op.
void EnsureMinimumThreads(int num_threads);
#ifdef CERES_USE_CXX_THREADS
ThreadPool thread_pool;
#endif // CERES_USE_CXX_THREADS
};
} // namespace internal
} // namespace ceres
#endif // CERES_INTERNAL_CONTEXT_IMPL_H_

View File

@@ -30,16 +30,16 @@
#include "ceres/coordinate_descent_minimizer.h"
#ifdef CERES_USE_OPENMP
#include <omp.h>
#endif
#include <algorithm>
#include <iterator>
#include <memory>
#include <numeric>
#include <vector>
#include "ceres/evaluator.h"
#include "ceres/linear_solver.h"
#include "ceres/minimizer.h"
#include "ceres/parallel_for.h"
#include "ceres/parameter_block.h"
#include "ceres/parameter_block_ordering.h"
#include "ceres/problem_impl.h"
@@ -59,6 +59,11 @@ using std::set;
using std::string;
using std::vector;
CoordinateDescentMinimizer::CoordinateDescentMinimizer(ContextImpl* context)
: context_(context) {
CHECK(context_ != nullptr);
}
CoordinateDescentMinimizer::~CoordinateDescentMinimizer() {
}
@@ -74,19 +79,16 @@ bool CoordinateDescentMinimizer::Init(
// Serialize the OrderedGroups into a vector of parameter block
// offsets for parallel access.
map<ParameterBlock*, int> parameter_block_index;
map<int, set<double*> > group_to_elements = ordering.group_to_elements();
for (map<int, set<double*> >::const_iterator it = group_to_elements.begin();
it != group_to_elements.end();
++it) {
for (set<double*>::const_iterator ptr_it = it->second.begin();
ptr_it != it->second.end();
++ptr_it) {
parameter_blocks_.push_back(parameter_map.find(*ptr_it)->second);
map<int, set<double*>> group_to_elements = ordering.group_to_elements();
for (const auto& g_t_e : group_to_elements) {
const auto& elements = g_t_e.second;
for (double* parameter_block: elements) {
parameter_blocks_.push_back(parameter_map.find(parameter_block)->second);
parameter_block_index[parameter_blocks_.back()] =
parameter_blocks_.size() - 1;
}
independent_set_offsets_.push_back(
independent_set_offsets_.back() + it->second.size());
independent_set_offsets_.back() + elements.size());
}
// The ordering does not have to contain all parameter blocks, so
@@ -109,8 +111,7 @@ bool CoordinateDescentMinimizer::Init(
const int num_parameter_blocks = residual_block->NumParameterBlocks();
for (int j = 0; j < num_parameter_blocks; ++j) {
ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
const map<ParameterBlock*, int>::const_iterator it =
parameter_block_index.find(parameter_block);
const auto it = parameter_block_index.find(parameter_block);
if (it != parameter_block_index.end()) {
residual_blocks_[it->second].push_back(residual_block);
}
@@ -120,6 +121,7 @@ bool CoordinateDescentMinimizer::Init(
evaluator_options_.linear_solver_type = DENSE_QR;
evaluator_options_.num_eliminate_blocks = 0;
evaluator_options_.num_threads = 1;
evaluator_options_.context = context_;
return true;
}
@@ -135,11 +137,12 @@ void CoordinateDescentMinimizer::Minimize(
parameter_block->SetConstant();
}
scoped_array<LinearSolver*> linear_solvers(
std::unique_ptr<LinearSolver*[]> linear_solvers(
new LinearSolver*[options.num_threads]);
LinearSolver::Options linear_solver_options;
linear_solver_options.type = DENSE_QR;
linear_solver_options.context = context_;
for (int i = 0; i < options.num_threads; ++i) {
linear_solvers[i] = LinearSolver::Create(linear_solver_options);
@@ -148,13 +151,11 @@ void CoordinateDescentMinimizer::Minimize(
for (int i = 0; i < independent_set_offsets_.size() - 1; ++i) {
const int num_problems =
independent_set_offsets_[i + 1] - independent_set_offsets_[i];
// No point paying the price for an OpemMP call if the set is of
// size zero.
// Avoid parallelization overhead call if the set is empty.
if (num_problems == 0) {
continue;
}
#ifdef CERES_USE_OPENMP
const int num_inner_iteration_threads =
min(options.num_threads, num_problems);
evaluator_options_.num_threads =
@@ -162,47 +163,43 @@ void CoordinateDescentMinimizer::Minimize(
// The parameter blocks in each independent set can be optimized
// in parallel, since they do not co-occur in any residual block.
#pragma omp parallel for num_threads(num_inner_iteration_threads)
#endif
for (int j = independent_set_offsets_[i];
j < independent_set_offsets_[i + 1];
++j) {
#ifdef CERES_USE_OPENMP
int thread_id = omp_get_thread_num();
#else
int thread_id = 0;
#endif
ParallelFor(
context_,
independent_set_offsets_[i],
independent_set_offsets_[i + 1],
num_inner_iteration_threads,
[&](int thread_id, int j) {
ParameterBlock* parameter_block = parameter_blocks_[j];
const int old_index = parameter_block->index();
const int old_delta_offset = parameter_block->delta_offset();
parameter_block->SetVarying();
parameter_block->set_index(0);
parameter_block->set_delta_offset(0);
ParameterBlock* parameter_block = parameter_blocks_[j];
const int old_index = parameter_block->index();
const int old_delta_offset = parameter_block->delta_offset();
parameter_block->SetVarying();
parameter_block->set_index(0);
parameter_block->set_delta_offset(0);
Program inner_program;
inner_program.mutable_parameter_blocks()->push_back(parameter_block);
*inner_program.mutable_residual_blocks() = residual_blocks_[j];
Program inner_program;
inner_program.mutable_parameter_blocks()->push_back(parameter_block);
*inner_program.mutable_residual_blocks() = residual_blocks_[j];
// TODO(sameeragarwal): Better error handling. Right now we
// assume that this is not going to lead to problems of any
// sort. Basically we should be checking for numerical failure
// of some sort.
//
// On the other hand, if the optimization is a failure, that in
// some ways is fine, since it won't change the parameters and
// we are fine.
Solver::Summary inner_summary;
Solve(&inner_program,
linear_solvers[thread_id],
parameters + parameter_block->state_offset(),
&inner_summary);
// TODO(sameeragarwal): Better error handling. Right now we
// assume that this is not going to lead to problems of any
// sort. Basically we should be checking for numerical failure
// of some sort.
//
// On the other hand, if the optimization is a failure, that in
// some ways is fine, since it won't change the parameters and
// we are fine.
Solver::Summary inner_summary;
Solve(&inner_program,
linear_solvers[thread_id],
parameters + parameter_block->state_offset(),
&inner_summary);
parameter_block->set_index(old_index);
parameter_block->set_delta_offset(old_delta_offset);
parameter_block->SetState(parameters + parameter_block->state_offset());
parameter_block->SetConstant();
}
parameter_block->set_index(old_index);
parameter_block->set_delta_offset(old_delta_offset);
parameter_block->SetState(parameters +
parameter_block->state_offset());
parameter_block->SetConstant();
});
}
for (int i = 0; i < parameter_blocks_.size(); ++i) {
@@ -227,14 +224,17 @@ void CoordinateDescentMinimizer::Solve(Program* program,
Minimizer::Options minimizer_options;
minimizer_options.evaluator.reset(
CHECK_NOTNULL(Evaluator::Create(evaluator_options_, program, &error)));
Evaluator::Create(evaluator_options_, program, &error));
CHECK(minimizer_options.evaluator != nullptr);
minimizer_options.jacobian.reset(
CHECK_NOTNULL(minimizer_options.evaluator->CreateJacobian()));
minimizer_options.evaluator->CreateJacobian());
CHECK(minimizer_options.jacobian != nullptr);
TrustRegionStrategy::Options trs_options;
trs_options.linear_solver = linear_solver;
minimizer_options.trust_region_strategy.reset(
CHECK_NOTNULL(TrustRegionStrategy::Create(trs_options)));
TrustRegionStrategy::Create(trs_options));
CHECK(minimizer_options.trust_region_strategy != nullptr);
minimizer_options.is_silent = true;
TrustRegionMinimizer minimizer;
@@ -245,17 +245,16 @@ bool CoordinateDescentMinimizer::IsOrderingValid(
const Program& program,
const ParameterBlockOrdering& ordering,
string* message) {
const map<int, set<double*> >& group_to_elements =
const map<int, set<double*>>& group_to_elements =
ordering.group_to_elements();
// Verify that each group is an independent set
map<int, set<double*> >::const_iterator it = group_to_elements.begin();
for (; it != group_to_elements.end(); ++it) {
if (!program.IsParameterBlockSetIndependent(it->second)) {
for (const auto& g_t_e : group_to_elements) {
if (!program.IsParameterBlockSetIndependent(g_t_e.second)) {
*message =
StringPrintf("The user-provided "
"parameter_blocks_for_inner_iterations does not "
"form an independent set. Group Id: %d", it->first);
"form an independent set. Group Id: %d", g_t_e.first);
return false;
}
}
@@ -268,7 +267,7 @@ bool CoordinateDescentMinimizer::IsOrderingValid(
// points.
ParameterBlockOrdering* CoordinateDescentMinimizer::CreateOrdering(
const Program& program) {
scoped_ptr<ParameterBlockOrdering> ordering(new ParameterBlockOrdering);
std::unique_ptr<ParameterBlockOrdering> ordering(new ParameterBlockOrdering);
ComputeRecursiveIndependentSetOrdering(program, ordering.get());
ordering->Reverse();
return ordering.release();

View File

@@ -34,6 +34,7 @@
#include <string>
#include <vector>
#include "ceres/context_impl.h"
#include "ceres/evaluator.h"
#include "ceres/minimizer.h"
#include "ceres/problem_impl.h"
@@ -57,6 +58,8 @@ class LinearSolver;
// program are constant.
class CoordinateDescentMinimizer : public Minimizer {
public:
explicit CoordinateDescentMinimizer(ContextImpl* context);
bool Init(const Program& program,
const ProblemImpl::ParameterMap& parameter_map,
const ParameterBlockOrdering& ordering,
@@ -64,9 +67,10 @@ class CoordinateDescentMinimizer : public Minimizer {
// Minimizer interface.
virtual ~CoordinateDescentMinimizer();
virtual void Minimize(const Minimizer::Options& options,
double* parameters,
Solver::Summary* summary);
void Minimize(const Minimizer::Options& options,
double* parameters,
Solver::Summary* summary) final;
// Verify that each group in the ordering forms an independent set.
static bool IsOrderingValid(const Program& program,
@@ -86,7 +90,7 @@ class CoordinateDescentMinimizer : public Minimizer {
Solver::Summary* summary);
std::vector<ParameterBlock*> parameter_blocks_;
std::vector<std::vector<ResidualBlock*> > residual_blocks_;
std::vector<std::vector<ResidualBlock*>> residual_blocks_;
// The optimization is performed in rounds. In each round all the
// parameter blocks that form one independent set are optimized in
// parallel. This array, marks the boundaries of the independent
@@ -94,6 +98,8 @@ class CoordinateDescentMinimizer : public Minimizer {
std::vector<int> independent_set_offsets_;
Evaluator::Options evaluator_options_;
ContextImpl* context_;
};
} // namespace internal

Some files were not shown because too many files have changed in this diff Show More