Update Ceres to latest upstream version

Brings new bounds limiting and also prepares build system
for the changes in the upstream.

Namely shared_ptr header and namespace is now being detected
by a build system rather than by hacks in the code.

This commit includes some changes to auto-detection flags
in SCons, presumably adding more consistency there. This
is main changes which are suppoed to be reviewed here.

Reviewers: campbellbarton

Differential Revision: https://developer.blender.org/D581
This commit is contained in:
Sergey Sharybin
2014-05-02 05:52:56 +06:00
parent 0a0e4e0e69
commit 72ac596e19
134 changed files with 3118 additions and 1499 deletions

View File

@@ -40,11 +40,13 @@ import string
import shutil
import re
# store path to tools
# store path to tools and modules
toolpath=os.path.join(".", "build_files", "scons", "tools")
modulespath=os.path.join(".", "build_files", "scons", "Modules")
# needed for importing tools
# needed for importing tools and modules
sys.path.append(toolpath)
sys.path.append(modulespath)
import Blender
import btools
@@ -176,6 +178,16 @@ if crossbuild and platform not in ('win32-vc', 'win64-vc'):
env['OURPLATFORM'] = platform
# Put all auto configuration run-time tests here
from FindSharedPtr import FindSharedPtr
from FindUnorderedMap import FindUnorderedMap
conf = Configure(env)
FindSharedPtr(conf)
FindUnorderedMap(conf)
env = conf.Finish()
configfile = os.path.join("build_files", "scons", "config", platform + "-config.py")
if os.path.exists(configfile):

View File

@@ -791,6 +791,78 @@ macro(TEST_UNORDERED_MAP_SUPPORT)
endif()
endmacro()
macro(TEST_SHARED_PTR_SUPPORT)
# This check are coming from Ceres library.
#
# Find shared pointer header and namespace.
#
# This module defines the following variables:
#
# SHARED_PTR_FOUND: TRUE if shared_ptr found.
# SHARED_PTR_TR1_MEMORY_HEADER: True if <tr1/memory> header is to be used
# for the shared_ptr object, otherwise use <memory>.
# SHARED_PTR_TR1_NAMESPACE: TRUE if shared_ptr is defined in std::tr1 namespace,
# otherwise it's assumed to be defined in std namespace.
include(CheckIncludeFileCXX)
set(SHARED_PTR_FOUND FALSE)
CHECK_INCLUDE_FILE_CXX(memory HAVE_STD_MEMORY_HEADER)
if(HAVE_STD_MEMORY_HEADER)
# Finding the memory header doesn't mean that shared_ptr is in std
# namespace.
#
# In particular, MSVC 2008 has shared_ptr declared in std::tr1. In
# order to support this, we do an extra check to see which namespace
# should be used.
include(CheckCXXSourceCompiles)
CHECK_CXX_SOURCE_COMPILES("#include <memory>
int main() {
std::shared_ptr<int> int_ptr;
return 0;
}"
HAVE_SHARED_PTR_IN_STD_NAMESPACE)
if(HAVE_SHARED_PTR_IN_STD_NAMESPACE)
message("-- Found shared_ptr in std namespace using <memory> header.")
set(SHARED_PTR_FOUND TRUE)
else()
CHECK_CXX_SOURCE_COMPILES("#include <memory>
int main() {
std::tr1::shared_ptr<int> int_ptr;
return 0;
}"
HAVE_SHARED_PTR_IN_TR1_NAMESPACE)
if(HAVE_SHARED_PTR_IN_TR1_NAMESPACE)
message("-- Found shared_ptr in std::tr1 namespace using <memory> header.")
set(SHARED_PTR_TR1_NAMESPACE TRUE)
set(SHARED_PTR_FOUND TRUE)
endif()
endif()
endif()
if(NOT SHARED_PTR_FOUND)
# Further, gcc defines shared_ptr in std::tr1 namespace and
# <tr1/memory> is to be included for this. And what makes things
# even more tricky is that gcc does have <memory> header, so
# all the checks above wouldn't find shared_ptr.
CHECK_INCLUDE_FILE_CXX("tr1/memory" HAVE_TR1_MEMORY_HEADER)
if(HAVE_TR1_MEMORY_HEADER)
CHECK_CXX_SOURCE_COMPILES("#include <tr1/memory>
int main() {
std::tr1::shared_ptr<int> int_ptr;
return 0;
}"
HAVE_SHARED_PTR_IN_TR1_NAMESPACE_FROM_TR1_MEMORY_HEADER)
if(HAVE_SHARED_PTR_IN_TR1_NAMESPACE_FROM_TR1_MEMORY_HEADER)
message("-- Found shared_ptr in std::tr1 namespace using <tr1/memory> header.")
set(SHARED_PTR_TR1_MEMORY_HEADER TRUE)
set(SHARED_PTR_TR1_NAMESPACE TRUE)
set(SHARED_PTR_FOUND TRUE)
endif()
endif()
endif()
endmacro()
# when we have warnings as errors applied globally this
# needs to be removed for some external libs which we dont maintain.

View File

@@ -0,0 +1,42 @@
def FindSharedPtr(conf):
"""
Detect shared_ptr availability
"""
found = False
namespace = None
header = None
if conf.CheckCXXHeader("memory"):
# Finding the memory header doesn't mean that shared_ptr is in std
# namespace.
#
# In particular, MSVC 2008 has shared_ptr declared in std::tr1. In
# order to support this, we do an extra check to see which namespace
# should be used.
if conf.CheckType('std::shared_ptr<int>', language = 'CXX', includes="#include <memory>"):
print("-- Found shared_ptr in std namespace using <memory> header.")
namespace = 'std'
header = 'memory'
elif conf.CheckType('std::tr1::shared_ptr<int>', language = 'CXX', includes="#include <memory>"):
print("-- Found shared_ptr in std::tr1 namespace using <memory> header..")
namespace = 'std::tr1'
header = 'memory'
if not namespace and conf.CheckCXXHeader("tr1/memory"):
# Further, gcc defines shared_ptr in std::tr1 namespace and
# <tr1/memory> is to be included for this. And what makes things
# even more tricky is that gcc does have <memory> header, so
# all the checks above wouldn't find shared_ptr.
if conf.CheckType('std::tr1::shared_ptr<int>', language = 'CXX', includes="#include <tr1/memory>"):
print("-- Found shared_ptr in std::tr1 namespace using <tr1/memory> header..")
namespace = 'std::tr1'
header = 'tr1/memory'
if not namespace:
print("-- Unable to find shared_ptrred_map>.")
conf.env['WITH_SHARED_PTR_SUPPORT'] = namespace and header
conf.env['SHARED_PTR_NAMESPACE'] = namespace
conf.env['SHARED_PTR_HEADER'] = header

View File

@@ -1,10 +1,11 @@
def test_unordered_map(conf):
def FindUnorderedMap(conf):
"""
Detect unordered_map availability
Returns (True/False, namespace, include prefix)
"""
namespace = None
header = None
if conf.CheckCXXHeader("unordered_map"):
# Even so we've found unordered_map header file it doesn't
# mean unordered_map and unordered_set will be declared in
@@ -17,16 +18,21 @@ def test_unordered_map(conf):
if conf.CheckType('std::unordered_map<int, int>', language = 'CXX', includes="#include <unordered_map>"):
print("-- Found unordered_map/set in std namespace.")
return True, 'std', ''
namespace = 'std'
header = 'unordered_map'
elif conf.CheckType('std::tr1::unordered_map<int, int>', language = 'CXX', includes="#include <unordered_map>"):
print("-- Found unordered_map/set in std::tr1 namespace.")
return True, 'std::tr1', ''
namespace = 'std::tr1'
header = 'unordered_map'
else:
print("-- Found <unordered_map> but can not find neither std::unordered_map nor std::tr1::unordered_map.")
return False, '', ''
elif conf.CheckCXXHeader("tr1/unordered_map"):
print("-- Found unordered_map/set in std::tr1 namespace.")
return True, 'std::tr1', 'tr1/'
namespace = 'std::tr1'
header = 'tr1/unordered_map'
else:
print("-- Unable to find <unordered_map> or <tr1/unordered_map>. ")
return False, '', ''
conf.env['WITH_UNORDERED_MAP_SUPPORT'] = namespace and header
conf.env['UNORDERED_MAP_NAMESPACE'] = namespace
conf.env['UNORDERED_MAP_HEADER'] = header

View File

@@ -1,4 +1,4 @@
from Modules.FindPython import FindPython
from FindPython import FindPython
py = FindPython()

View File

@@ -46,10 +46,23 @@ if(WITH_LIBMV)
-DLIBMV_NO_FAST_DETECTOR=
)
TEST_SHARED_PTR_SUPPORT()
if(SHARED_PTR_FOUND)
if(SHARED_PTR_TR1_MEMORY_HEADER)
add_definitions(-DCERES_TR1_MEMORY_HEADER)
endif()
if(SHARED_PTR_TR1_NAMESPACE)
add_definitions(-DCERES_TR1_SHARED_PTR)
endif()
else()
message(FATAL_ERROR "Unable to find shared_ptr.")
endif()
list(APPEND INC
third_party/gflags
third_party/glog/src
third_party/ceres/include
third_party/ceres/config
../../intern/guardedalloc
)

View File

@@ -6,6 +6,7 @@
import sys
import os
from FindSharedPtr import FindSharedPtr
Import('env')
@@ -13,6 +14,15 @@ defs = []
incs = '.'
if env['WITH_BF_LIBMV']:
if not env['WITH_SHARED_PTR_SUPPORT']:
print("-- Unable to find shared_ptr which is required for compilation.")
exit(1)
if env['SHARED_PTR_HEADER'] == 'tr1/memory':
defs.append('CERES_TR1_MEMORY_HEADER')
if env['SHARED_PTR_NAMESPACE'] == 'std::tr1':
defs.append('CERES_TR1_SHARED_PTR')
defs.append('GOOGLE_GLOG_DLL_DECL=')
defs.append('WITH_LIBMV')
defs.append('WITH_LIBMV_GUARDED_ALLOC')
@@ -27,7 +37,7 @@ if env['WITH_BF_LIBMV']:
src += env.Glob('libmv/tracking/*.cc')
src += env.Glob('third_party/gflags/*.cc')
incs += ' ../Eigen3 third_party/gflags third_party/glog/src third_party/ceres/include ../../intern/guardedalloc'
incs += ' ../Eigen3 third_party/gflags third_party/glog/src third_party/ceres/include third_party/ceres/config ../../intern/guardedalloc'
incs += ' ' + env['BF_PNG_INC']
incs += ' ' + env['BF_ZLIB_INC']

View File

@@ -139,10 +139,23 @@ if(WITH_LIBMV)
-DLIBMV_NO_FAST_DETECTOR=
)
TEST_SHARED_PTR_SUPPORT()
if(SHARED_PTR_FOUND)
if(SHARED_PTR_TR1_MEMORY_HEADER)
add_definitions(-DCERES_TR1_MEMORY_HEADER)
endif()
if(SHARED_PTR_TR1_NAMESPACE)
add_definitions(-DCERES_TR1_SHARED_PTR)
endif()
else()
message(FATAL_ERROR "Unable to find shared_ptr.")
endif()
list(APPEND INC
third_party/gflags
third_party/glog/src
third_party/ceres/include
third_party/ceres/config
../../intern/guardedalloc
)
@@ -264,6 +277,15 @@ defs = []
incs = '.'
if env['WITH_BF_LIBMV']:
if not env['WITH_SHARED_PTR_SUPPORT']:
print("-- Unable to find shared_ptr which is required for compilation.")
exit(1)
if env['SHARED_PTR_HEADER'] == 'tr1/memory':
defs.append('CERES_TR1_MEMORY_HEADER')
if env['SHARED_PTR_NAMESPACE'] == 'std::tr1':
defs.append('CERES_TR1_SHARED_PTR')
defs.append('GOOGLE_GLOG_DLL_DECL=')
defs.append('WITH_LIBMV')
defs.append('WITH_LIBMV_GUARDED_ALLOC')
@@ -272,7 +294,7 @@ if env['WITH_BF_LIBMV']:
src = env.Glob('*.cc')
$src
incs += ' ../Eigen3 third_party/gflags third_party/glog/src third_party/ceres/include ../../intern/guardedalloc'
incs += ' ../Eigen3 third_party/gflags third_party/glog/src third_party/ceres/include third_party/ceres/config ../../intern/guardedalloc'
incs += ' ' + env['BF_PNG_INC']
incs += ' ' + env['BF_ZLIB_INC']

View File

@@ -30,6 +30,7 @@ set(INC
.
include
internal
config
../gflags
../../
)
@@ -44,7 +45,6 @@ set(SRC
internal/ceres/block_evaluate_preparer.cc
internal/ceres/block_jacobian_writer.cc
internal/ceres/block_jacobi_preconditioner.cc
internal/ceres/block_random_access_crs_matrix.cc
internal/ceres/block_random_access_dense_matrix.cc
internal/ceres/block_random_access_diagonal_matrix.cc
internal/ceres/block_random_access_matrix.cc
@@ -69,6 +69,8 @@ set(SRC
internal/ceres/dense_sparse_matrix.cc
internal/ceres/detect_structure.cc
internal/ceres/dogleg_strategy.cc
internal/ceres/dynamic_compressed_row_jacobian_writer.cc
internal/ceres/dynamic_compressed_row_sparse_matrix.cc
internal/ceres/evaluator.cc
internal/ceres/file.cc
internal/ceres/generated/partitioned_matrix_view_d_d_d.cc
@@ -159,7 +161,6 @@ set(SRC
internal/ceres/block_evaluate_preparer.h
internal/ceres/block_jacobian_writer.h
internal/ceres/block_jacobi_preconditioner.h
internal/ceres/block_random_access_crs_matrix.h
internal/ceres/block_random_access_dense_matrix.h
internal/ceres/block_random_access_diagonal_matrix.h
internal/ceres/block_random_access_matrix.h
@@ -185,6 +186,9 @@ set(SRC
internal/ceres/dense_sparse_matrix.h
internal/ceres/detect_structure.h
internal/ceres/dogleg_strategy.h
internal/ceres/dynamic_compressed_row_finalizer.h
internal/ceres/dynamic_compressed_row_jacobian_writer.h
internal/ceres/dynamic_compressed_row_sparse_matrix.h
internal/ceres/evaluator.h
internal/ceres/execution_summary.h
internal/ceres/file.h
@@ -253,6 +257,8 @@ set(SRC
# internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
# internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
# internal/ceres/generated/partitioned_matrix_view_2_4_4.cc
# internal/ceres/generated/partitioned_matrix_view_2_4_8.cc
# internal/ceres/generated/partitioned_matrix_view_2_4_9.cc
# internal/ceres/generated/partitioned_matrix_view_2_4_d.cc
# internal/ceres/generated/partitioned_matrix_view_2_d_d.cc
# internal/ceres/generated/partitioned_matrix_view_4_4_2.cc
@@ -269,6 +275,8 @@ set(SRC
# internal/ceres/generated/schur_eliminator_2_3_d.cc
# internal/ceres/generated/schur_eliminator_2_4_3.cc
# internal/ceres/generated/schur_eliminator_2_4_4.cc
# internal/ceres/generated/schur_eliminator_2_4_8.cc
# internal/ceres/generated/schur_eliminator_2_4_9.cc
# internal/ceres/generated/schur_eliminator_2_4_d.cc
# internal/ceres/generated/schur_eliminator_2_d_d.cc
# internal/ceres/generated/schur_eliminator_4_4_2.cc

View File

@@ -1,687 +1,646 @@
commit 15bf0d5018dfe432ef67e726b248efcf3b58bc4f
commit 8c62487e437b91d3d354cd1ae8957e43fe540732
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Fri May 16 09:44:18 2014 -0700
Preparations for 1.9.0 release.
Version bump.
minor docs update.
Change-Id: I2fbe20ba4af6b2e186fe244c96ce6d6464fe0469
commit 0831275a78ab65e4c95979598cb35c54d03d3185
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Fri May 16 08:17:54 2014 -0700
Documentation update.
1. Update iOS build instructions.
2. Update version history.
Change-Id: I49d62e86ecff39190b50c050cb12eef4e2773357
commit c7c7458625996a20203f1366d11bd701e5fb621b
Author: Jack Feng <jackfengji@gmail.com>
Date: Mon May 12 10:23:56 2014 +0800
add support for building for ios
use ios-cmake to build for ios
Change-Id: I6b17c33339f3121322a4004d79629b22a62f7a94
commit 36c2ce87d13b9b7123bd0473b8b45fb3b6ae4271
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Mon Jan 13 20:38:28 2014 +0600
Date: Mon Jan 13 21:18:08 2014 +0600
Code cleanup: fix no previous declaration warnings
Libmv 2D homography estimation example application
Move internally used functions to an anonymous namespace,
Add an example application of homography matrix estimation
from a 2D euclidean correspondences which is done in two
steps:
Change-Id: I84c98facf901e64771d131b088e20e2c033cab70
- Coarse algebraic estimation
- Fine refinement using Ceres minimizer
Nothing terribly exciting apart from an example of how to
use user callbacks.
User callback is used here to stop minimizer when average
of symmetric geometric distance becomes good enough.
This might be arguable whether it's the best way to go
(in some cases you would want to stop minimizer when
maximal symmetric distance is lower than a threshold) but
for a callback usage example it's good enough to stick
to current logic.
Change-Id: I60c8559cb10b001a0eb64ab71920c08bd68455b8
commit 80a53eebfd28bfc032cedbf7852d5c56eb1d5af5
commit d99a3a961e4a6ff7218d0ab749da57cf1a1677bd
Author: Björn Piltz <bjornpiltz@gmail.com>
Date: Wed May 7 14:59:12 2014 +0200
Separate PUBLIC and PRIVATE library dependencies
Do not propagate 3d party libs through
IMPORTED_LINK_INTERFACE_LIBRARIES_[DEBUG/RELEASE] mechanism
when building shared libraries. SuiteSparse, lapack & co
are considered private. Glog still gets propagated since
it is part of the public interface. See documentation of
TARGET_LINK_LIBRARIES().
Change-Id: If0563b0c705b102876f5190e9a86694d10f79283
commit 1c089e8453583876f417b214f76a5863d7694986
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Thu Jan 9 12:40:54 2014 -0800
Date: Tue May 13 16:58:25 2014 -0700
Faster LBFGS.
Notational fix in modeling.rst by William Rucklidge.
1. Use column major storage for the various matrices used by
LowRankInverseHessian. Since all the operations are on columns.
2. Use a circular buffer to keep track of history of the LBFGS updates
so that an update does not require copying the entire history. This
makes the updates O(1) rather than O(rank).
The implementation has been checked against the denoising code
where it gives numerically identical results. The overhead of the
LBFGS code is now near negligible as compared to the gradient evaluation.
On a sample problem
before 1050ms after: 630ms
Change-Id: I537ba506ac35fc4960b304c10d923a8dea2ae031
Change-Id: Iffa127541380fcc32da13fe4ac474692e1e3d0ec
commit f55780063620e7a3dcfe7e018d6488bf6a5b29da
commit 082d9e2a1b43b26a81157a6c711de0ff34c40ba4
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Wed Jan 8 10:43:31 2014 -0800
Date: Mon May 12 20:41:27 2014 -0700
Reduce logging verbosity.
Add iOS.cmake from the ios-cmake project.
When user specifies Solver::Options::logging_type = SILENT,
ensure that the minimizer does not log anything.
URL: https://github.com/cristeab/ios-cmake
Commit: 86dc085f0d5ed955cd58e2657cc3efc7c1aabbc8
Change-Id: I94e34dae504881ab36d4a66e6adb7a19a227363e
Change-Id: I8fe6023d4cb6655b5a724e8b695fdae87ce3b685
commit 85561eee951c91e578984c6d3eecf0073acabb64
commit a97056c9752fe7223c8560da58862ecb1fd241ad
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Tue Jan 7 22:22:14 2014 -0800
Date: Tue May 13 16:45:36 2014 -0700
Use int32 for parameter block sizes.
Various documentation fixes from William Rucklidge.
CostFunction now uses int32 instead of int16
to store the size of its parameter blocks.
This is an API breaking change.
Change-Id: I032ea583bc7ea4b3009be25d23a3be143749c73e
Change-Id: I102e98f41f4b5fe2a84d1224d5ed7517fdfdb022
commit a7fda3317b1a97702750bea96ac3ef3d1a2afb49
commit 2f8fb218f0a08102231ace07ef02b34b4aad7336
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Mon Jan 6 10:25:42 2014 +0000
Date: Tue May 13 20:57:39 2014 +0100
Fix typos in error messages in line search config checks.
Adding function to update CMake cache variables and preserve help.
Change-Id: I3ae2ae58328e996598e3e32c12869d2b10109ef7
- Previously we were replicating the same two lines to update a cache
variable whilst preserving its help string.
- This commit adds a function which wraps up this common operation into
a single line.
Change-Id: Ic78a5adf5d59262bbbcec1e353ded7620391e862
commit f695322eb8c5ff118f0d27f68d46d557338e5db1
commit 8f4dcb25f1be74a8c12c0f9eeb67b6b0755563f5
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Sat Jan 4 14:28:23 2014 -0800
Date: Tue Apr 29 21:40:57 2014 -0700
Remove a compilation warning on windows.
Documentation update.
Only define NOMINMAX if it is not already defined.
Update modeling.rst and solving.rst to reflect
changes to the API.
Thanks to Pierre Moulon for this fix.
Change-Id: Ia5dc0f5ff2afe10e4c7e97a57f54297d82052b21
Change-Id: Id1a8adfed1486f08e5fd67c5af2d29708a26490c
commit b811041d78d80518db153ef3030bcbdbaf80df8d
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Thu Jan 2 15:19:17 2014 +0600
commit d48e7050225730f61eaef851def5b43bc439e991
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Sat May 10 08:58:58 2014 +0100
Code cleanup: fix no previous declaration warnings
Configure config.h and include it from the build directory.
Moved some internally used functions into an anonymous namespace.
- Previously we overwrote the default (empty) config.h in the source
tree with a configured config.h, generated using the current compile
options.
- This was undesirable as it could lead to inadvertant commits of the
generated config.h.
Change-Id: Ie82df61b0608abac79ccc9f7b14e7f7e04ab733d
- This patch moves the default config.h to <src>/config/ceres/internal,
separate from the other headers, thus if Ceres is compiled without
CMake this directory will now also have to be included. This
directory is _not_ added to the CMake include directories for Ceres
(thus the default config.h is never used when compiling with CMake).
- When using CMake, the generated config.h is now placed in
<build>/config/ceres/internal, which is in turn added to the include
directories for Ceres when it is compiled, and the resulting config.h
is copied to ceres/internal when installed.
Change-Id: Ib1ba45e66e383ade2ebb08603af9165c1df616f2
commit f14f6bf9b7d3fbd2cab939cf4ad615b317e93c83
commit 11c496164ffe9809306945c2b81276efcd51533d
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Thu Dec 26 09:50:45 2013 -0800
Date: Fri May 9 16:27:03 2014 -0700
Speed up SPARSE_NORMAL_CHOLESKY when using CX_SPARSE.
Fix 80cols violation in local_parameterization.h
When using sparse cholesky factorization to solve the linear
least squares problem:
Ax = b
There are two sources of computational complexity.
1. Computing H = A'A
2. Computing the sparse Cholesky factorization of H.
Doing 1. using CX_SPARSE is particularly expensive, as it uses
a generic cs_multiply function which computes the structure of
the matrix H everytime, reallocates memory and does not take
advantage of the fact that the matrix being computed is a symmetric
outer product.
This change adds a custom symmetric outer product algorithm for
CompressedRowSparseMatrix.
It has a symbolic phase, where it computes the sparsity structure
of the output matrix and a "program" which allows the actual
multiplication routine to determine exactly which entry in the
values array each term in the product contributes to.
With these two bits of information, the outer product H = A'A
can be computed extremely fast without any reasoning about
the structure of H.
Further gains in efficiency are made by exploiting the block
structure of A.
With this change, SPARSE_NORMAL_CHOLESKY with CX_SPARSE as the
backend results in > 300% speedup for some problems.
The symbolic analysis phase of the solver is a bit more expensive
now but the increased cost is made up in 3-4 iterations.
Change-Id: I5e4a72b4d03ba41b378a2634330bc22b299c0f12
Change-Id: I07f59baa9e4aba7c5ae028f0c144ea9ad153d49a
commit d79f886eb87cb064e19eb12c1ad3d45bbed92198
commit af3154422b63b7792ecd23b00ca1a0c003764dae
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Mon Dec 30 07:39:10 2013 -0800
Date: Fri May 9 05:45:03 2014 -0700
Refactor line search error checking code.
Add Alex Stewart as a maintainer.
Move the error checking code into its own function
so that it can be used in upcoming changes.
Update contributing.rst to mention Alex
as one of the people who can be added
as a reviewer.
Change-Id: Icf348e5a8bbe8f8b663f04fb8cfc9a2149b12f22
Change-Id: I30ff3e635e8c419e11e8f20394aaea5f284a10d5
commit 2b16b0080b6e673eaaf9ed478c9e971d9fcd65de
commit ea765850685f1ff0431da5212656378fc20d3673
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Wed May 7 20:46:17 2014 +0100
Adding autogenerated Ceres config.h to #define Ceres compile options.
- Previously we passed all compile options to Ceres via add_definitions
in CMake. This was fine for private definitions (used only by Ceres)
but required additional work for public definitions to ensure they
were correctly propagated to clients via CMake using
target_compile_definitions() (>= 2.8.11) or add_definitions().
- A drawback to these approaches is that they did not work for chained
dependencies on Ceres, as in if in the users project B <- A <- Ceres,
then although the required Ceres public compile definitions would
be used when compiling A, they would not be propagated to B.
- This patch replaces the addition of compile definitions via
add_definitions() with an autogenerated config.h header which
is installed with Ceres and defines all of the enabled Ceres compile
options.
- This removes the need for the user to propagate any compile
definitions in their projects, and additionally allows post-install
inspect of the options with which Ceres was compiled.
Change-Id: Idbdb6abdad0eb31e7540370e301afe87a07f2260
commit cbf955474acf8f275b272da6ff5acd3a629cc806
Author: Björn Piltz <bjornpiltz@gmail.com>
Date: Wed May 7 17:10:15 2014 +0200
Fixes swapped verboselevel and condition.
Change-Id: I296d86e6bbf415be4bfd19d6a0fe0963e3d36d74
commit 3209b045744ea31f38d74bd9e9c8f88e605e7f76
Author: Björn Piltz <bjornpiltz@gmail.com>
Date: Wed May 7 17:02:27 2014 +0200
Fixed warning : 'va_copy' : macro redefinition
MSVC 2013 has got va_copy
Compare
http://msdn.microsoft.com/en-us/library/kb57fad8(v=vs.110).aspx
and
http://msdn.microsoft.com/en-us/library/kb57fad8.aspx.
Change-Id: If0937c76e8d250cde4b343844f3d35c980bf0921
commit 1df2f0f5d704f0cc458cf707e2602d495979e3c6
Author: Björn Piltz <bjornpiltz@gmail.com>
Date: Wed May 7 11:10:30 2014 +0200
Removed MSVC warnings
These are warnings which show up when using Ceres.
Change-Id: Id1f382f46b8a60743f0b12535b5b3cdf46f988e0
commit eca7e1c635581834c858794e09c1e876323b7775
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Fri Dec 20 15:22:26 2013 -0800
Date: Tue May 6 10:16:19 2014 -0700
CompressedRowSparseMatrix::AppendRows and DeleteRows bugfix.
Remove BlockRandomAccessCRSMatrix.
CompressedRowSparseMatrix can store the row and column block structure
but the AppendRows and DeleteRows methods did not pay attention to them.
This meant that it was possible to get to a CompressedRowSparseMatrix
whose block structure did not match the contents of the matrix.
It is not used anywhere.
This change fixes this problem.
Change-Id: I1b3c807fc03d8c049ee20511e2bc62806d211b81
Change-Id: I2a8ebbdacf788582f21266825ead3f76646da29e
commit 27bb4a8589c47a65b5ea2c01872a903043d0ef74
commit 7088a08f5d9e04e75a5a4c3823ef7927e13ff0e4
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Wed Dec 18 13:06:58 2013 -0800
Date: Mon May 5 09:02:05 2014 -0700
Handle empty problems consistently.
Fix some 80col violations and reflow the comments in cmake.in file.
Until now Ceres was inconsistent in the way it handled a solve
call on an "empty" Problem. If the problem did not contain
any residual or parameter blocks, it failed. However, if after
constructing the reduced program, the problem was found to not
contain any modifiable parameter blocks, it was considered a valid
problem which had a constant objective function value.
When creating problems automatically, it is often the case that
an empty problem is a corner case. This change makes handling this
corner case consistent with the rest of Ceres logic.
Change-Id: Ia9da09fbf5d5cd7eae6b39a92c1976b8645db9fe
Change-Id: I4c65c89b794845aeef69159a03350c727e2ee812
commit dcee120bac04911bf01d8365bddca87c74ce2af9
commit 95cce0834d5a2d72568e6d2be968a51c244c2787
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Sat Dec 7 21:48:56 2013 -0800
Date: Mon May 5 08:54:50 2014 -0700
Consolidate SolverTerminationType enum.
Remove some errant tabs.
1. Rename SolverTerminationType to TerminationType.
2. Consolidate the enum as
a. CONVERGENCE - subsumes FUNCTION_TOLERANCE, PARAMETER_TOLERANCE and GRADIENT_TOLERANCE
b. NO_CONVERGENCE
c. FAILURE - captures all kinds of failures including DID_NOT_RUN.
d. USER_SUCCESS
e. USER_FAILURE
3. Solver::Summary::error is renamed to be Solver::Summary::message, to both
reduce confusion as well as capture its true meaning.
Change-Id: I27a382e66e67f5a4750d0ee914d941f6b53c326d
Change-Id: Ie1f7051e99bcb15ad068711b68a9d8f317b12ed7
commit d1cf320bb4f032cb14b20114a29ce2d867307492
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Thu Nov 28 23:11:11 2013 +0600
Made collections port compatible with MSVC2008
The issue was caused by the fact that in this version
of MSVC unordered_map class is defined in <unordered_map>
header file, but this file declares the class int std::tr1
namespace.
This confused existing assumption that if there's an
existing <unordered_map> file then class is declared
in std namespace.
Added an extra check to CMake which detects whether
it's std or std::tr1 which actually contains class
of unordered_map.
Change-Id: Ic5cf41913895a6ce8e791cc7602d7cf5492c34de
commit 324eccb5f6ce2a1a0061ec9f3c40778a029a2d97
commit a536ae76dfa2dbe2bc487900b98cf6c15276c649
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Tue Dec 3 09:28:14 2013 -0800
Date: Sun May 4 21:18:42 2014 -0700
Restore the state of the Problem after a call to Evaluate.
Lazily initialize the bounds arrays in ParameterBlock.
Calling Problem::Evaluate mutates the state of the parameter blocks.
In particular, depending on the set and order of parameter blocks
passed to the evaluate call, it will change the internal indexing
used by the Program object used by ProblemImpl. This needs to be
undone before Evaluate returns, otherwise the Problem object
is in an invalid state.
Problems that do not use bounds do not have to pay the
price of storing bounds constraints.
To help with testing and debugging in the future, a new method
Program::IsValid has been added which checks whether the problem
has its parameter and residual blocks in the right state.
Also replace the raw pointer access to the upper and
lower bounds arrays with accessors which hides the
lazy initialization from the user.
Thanks to Stefan Leutenegger for reporting this.
Change-Id: I209b486a31433f0cbb58b570047649eca6d42b56
Change-Id: I0325a35de9c29f853559f891e32e7c777686e537
commit 3b1ad31a1fe89fe0bd78e1fffdf22d47d43faaf5
commit 633b50b7af9841607c07133f585e131fba7de177
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Mon Dec 2 15:43:20 2013 -0800
Date: Fri May 2 22:46:20 2014 -0700
Fix build breakage on old versions of SuiteSparse.
Add the (2,4,8) template specialization.
Change-Id: I2a061615fc374abef2ed323c298359002a6fc5f1
Change-Id: I058bcebdd1725031d573404133b184d6f27dc005
commit 5fd480692b0a0c87e2af2f5a8754042a14f5f089
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Mon Dec 2 12:16:53 2013 -0800
commit 5ffe06019a6c741ee7edc940ffeeceaaeabfa05d
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Thu May 1 12:06:46 2014 +0100
Add more documentation to the linear solver enums.
Export Ceres compile definitions to targets compiled against Ceres.
Change-Id: Id57f76f73fa38043c0b6729972b1de8578ad7ede
- Previously all Ceres compile definitions were private to Ceres, that
is they were not exported to users via the CMake export mechanism.
- Now that we use compile definitions in public (installed) Ceres
headers, we need to export the Ceres compile definitions.
- If we did not do this, then the client's code 'see's' a different
version of the Ceres headers to those which were in fact compiled,
or in the case of shared_ptr, may not find the required header.
- This patch makes use of the new, in CMake 2.8.11, function:
target_compile_definitions() to export all of the Ceres compile
definitions using CMake's export functionality.
- For CMake versions < 2.8.11, we have to use the blunter instrument of
calling add_definitions() in CeresConfig.cmake (invoked by a call to
find_package(Ceres)). This is messy because it ends up adding the
Ceres compile definitions to any target declared in the user's code
after the call to find_package(Ceres). Although this should do no
harm as all of our defines are prefaced with CERES_, so any
unintentional name clashes are unlikely.
Change-Id: I5dea80949190eaf4fb08ea4ac568ce28c32dd4e0
commit d73acd035359886dfa1c5762b01c6f6449edcab8
commit 0e811b0881f1f21df0ae04fd745ae4ba5189cac1
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Mon Dec 2 12:02:03 2013 -0800
Date: Thu May 1 07:54:12 2014 -0700
Fix a bug in Minimizer::RunCallbacks.
Solver::Summary::message was not being updated when the solver
terminated because of a user's iteration callback indicating
success or failure.
Thanks to Sergey Sharybin for reporting this.
Change-Id: I27e6e5eed086920ddf765461b0159417ac79d7b3
commit 31b503792611d2119bb1acb3528fc8d58c5bd029
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Wed Apr 30 15:02:38 2014 -0700
Lint cleanup from William Rucklidge.
Change-Id: I8abcfd369f41b895ce746a21a35f250fe05c39d1
Change-Id: If545f114c1a2b07edd660a3c71ecfc16ffa25e43
commit 3faac6a28cec4c99c41421d3f585f3786be443b3
commit 15c1210a8bdf3e936b4ef600d75f0fbb70878fb5
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Thu Nov 28 07:13:26 2013 -0800
More lint cleanups and breakage fixes.
The previous CL was a premature submit due to lack of coffee.
Change-Id: Id425d0ef332f569a954f0413e6b1ae6087f40f30
commit ed92366592a951041bd0367c24006101ef7b6286
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Thu Nov 28 06:50:43 2013 -0800
Lint cleanup from William Rucklidge.
Change-Id: I745810f5496a1b93263b20ff140f8883da61995e
commit 34b6359f39884683f2bbf06c93040afd42ae135d
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Thu Nov 28 18:51:34 2013 +0600
Fix compilation error after recent enum rename in 33e01b9
Change-Id: I920aa4754df6b013e86f0e77c61338d7a80e7f45
commit 33e01b9c5e1416fe29c55ac0332cdca21c053c83
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Wed Nov 27 10:24:03 2013 -0800
Rename LinearSolverTerminationType enums.
This increases clarity, drops redundant enums and makes things
cleaner all around.
Change-Id: I761f195ddf17ea6bd8e4e55bf5a72863660c4c3b
commit 068437eb89d495d905465544ccd442efef457b04
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Wed Nov 27 07:05:57 2013 -0800
Pipe minimizer termination messages to Solver::Summary.
All minimizer termination messages are now available as
Solver::Summary::error.
This is part of the ongoing refactoring or
Change-Id: I4514c3c042645bbd1471bcde9bd3dbf81d9ee8b0
commit 89a592f410fb6f80c03dea84b6b9f1a10bea36c1
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Tue Nov 26 11:35:49 2013 -0800
LinearSolver::Summary::status -> LinearSolver::Summary::message.
And a bunch of minor lint cleanups as they showed up.
Change-Id: I430a6b05710923c72daf6a5df4dfcd16fbf44b3a
commit b16e118b96c55451c0d8556f3c5b52ad36b69cac
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Mon Nov 25 05:47:43 2013 -0800
Better error checking and reporting for linear solvers.
A lot of error checking cruft has accumulated over the years
in the various linear solvers. This change makes the error reporting
more robust and consistent across the various solvers.
Preconditioners are not covered by this change and will be the
subject of a future change.
Change-Id: Ibeb2572a1e67758953dde8d12e3abc6d1df9052d
commit 5794d41be2d8d6a67dcdfe607e66050f0ac04c55
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Mon Nov 25 13:37:02 2013 -0800
Remove overzealous checks in Summary::FullReport.
Thanks to sebi.koch@gmail.com for reporting this.
Change-Id: I1ba9b375e5cf66639e292ba37b34a90446f13162
commit 40ef90304ac200bb948549e8e3748e487d27dc53
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Mon Nov 25 16:36:40 2013 +0000
Adding VLOG output to line search.
- Previously line search was sparse in terms of debug orientated VLOG
output which made debugging failure cases difficult.
Change-Id: Idfabf74d2b3f7b8256f79dff8c6b7fcdc2fcf4d3
commit 1284a5141426597f3ca1e29ae8548c9b4c43c9c1
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Sun Nov 24 15:09:43 2013 -0800
Use explicit formula to solve quadratic polynomials.
polynomial.cc implements a companion matrix base method for solving
polynomials. This is both expensive and numerically sensitive.
This change adds a quadratic equation solver. Instead of using the
usual quadratic formula, it uses the formula suggested by BKP Horn
for improved numerical stability.
Change-Id: I476933ce010d81db992f1c580d2fb23a4457eb3e
commit a9334d67d7973c0f56e65f12ae897dd53504ef0d
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Wed Nov 20 10:12:23 2013 -0800
Fix constant parameter handling in inner iterations.
There was a bug in the way RemoveFixedBlocksFromProgram was working.
It only removed the constant parameter blocks from the
linear_solver_ordering, it was not even aware of the
inner_iteration_ordering.
This change fixes this bug. The code for RemoveFixedBlocksFromProgram
is also cleaned up and made more readable and the test have been updated.
Thanks to Mikael Persson for reporting this.
Change-Id: I454fa89f9b6f4f6320b02d5235e6f322cc15ff51
commit 331ff090dcae7096cea50144047b71cab2d3e819
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Mon Nov 25 13:44:53 2013 +0000
Downgrading log status of BFGS secant condition messages.
- These messages were originally VLOG(2) and were mistakenly upgraded to
WARNINGs when the tolerances were reduced.
Change-Id: I89dee666a09bc82cfa89b793dc0907268662f95e
commit 9697a08a2bf29531671526b49df73bfbc0d7d237
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Sat Nov 23 10:03:37 2013 +0000
Defining CERES_FOUND in addition to Ceres_FOUND in CeresConfig.
- Previously we relied on FindPackage() to define Ceres_FOUND when
find_package(Ceres) was called.
- This is fine, but users might legitimately expect the variable to be
CERES_FOUND given the form of CERES_INCLUDE_DIRS/LIBRARIES.
- As there is an inconsistency in the CMake recommended names when
FindPackage() is called in Module vs Config form, we now explicltly
define both.
Change-Id: I54bce9aa112b684d26b60a9ae4d11eb7925a6ee5
commit 66e15b41d80b155f333f099a0278d50312cdaa15
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Fri Nov 22 07:59:23 2013 -0800
Date: Tue Apr 29 08:12:19 2014 -0700
Lint cleanup from Jim Roseborough.
Change-Id: I6ddbf5c3d66595d27f7967a309768e5f5dd7e1fd
Change-Id: I53f4e0d020602443b397387b8c5908f25649403d
commit 79bde35f29291cf464b59f3dc2dd9f1fa88776a9
commit b1668067f1c97520d5d28eecf2c11d2afc1b01b3
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Thu Nov 21 21:33:51 2013 -0800
Date: Tue Apr 29 08:12:19 2014 -0700
SuiteSparse errors do not cause a fatal crash.
Variety of changes to documentation and example code.
1. Move LinearSolverTerminationType to ceres::internal.
2. Add FATAL_ERROR as a new enum to LinearSolverTerminationType.
3. Pipe SuiteSparse errors via a LinearSolverTerminationType so
to distinguish between fatal and non-fatal errors.
4. Update levenberg marquardt and dogleg strategies to deal
with FATAL_ERROR.
5. Update trust_region_minimizer to terminate when FATAL_ERROR
is encountered.
6. Remove SuiteSparse::SolveCholesky as it screws up the error
handling.
7. Fix all clients calling SuiteSparse to handle the result of
SuiteSparse::Cholesky correctly.
8. Remove fatal failures in SuiteSparse when symbolic factorization
fails.
9. Fix all clients of SuiteSparse to deal with null symbolic factors.
1. Update version history.
2. Minor changes to the tutorial to reflect the bounds constrained
problem.
3. Added static factory methods to the SnavelyReprojectionError.
4. Removed relative gradient tolerance from types.h as it is
not true anymore.
This is a temporary fix to deal with some production problems. A more
extensive cleanup and testing regime will be put in place in a
subsequent CL.
Change-Id: I1f60d539799dd95db7ecc340911e261fa4824f92
Change-Id: I8de386e5278a008c84ef2d3290d2c4351417a9f1
commit a674e0f8534ea6948f70a72fe9718e07b3d039ff
commit 658407dacc351a999206980fbb3265099e50e7a3
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Thu Nov 21 22:12:15 2013 -0800
Date: Mon Apr 28 13:25:17 2014 -0700
Fix corrector_test.cc.
Add missing template specializations to the NDK build.
Fix two death tests dealing with the sign of the gradient.
Change-Id: Ic91d54a64cc509307c94fce6d1fca083078936e2
Change-Id: I42bb6c3bd47648050298472af80333aa900e79bf
commit a8006af3110e98d64fb369e958fc00ec88d771a3
commit 5d7eed87b47871bc882af765188fa4fbca976855
Author: Björn Piltz <bjornpiltz@gmail.com>
Date: Wed Apr 23 22:13:37 2014 +0200
Suppport for MSVC DLLs.
Change-Id: Ibbcc4ba4e59f5bbf1cb91fe81c7d3b9042d03493
commit c830820a5c2be0d0cecb0822f2cff8b4ffe88f36
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Mon Apr 28 10:28:24 2014 -0700
Add missing files to Android.mk
Change-Id: Ibdf577c592bcde0fe5c2ce343ed8e9028b82af8f
commit ceb7a3beaad140762b499f9a306fd7230715941a
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Mon Apr 28 13:50:09 2014 +0600
Fix compilation error when using G++ compiler
This compiler defines shared_ptr in std::tr1 namespace, but
for this <tr1/memory> is to be included. Further, this compiler
also does have <memory> header which confused previous shared
pointer check.
Simplified logic around defines now, so currently we've got:
- CERES_TR1_MEMORY_HEADER defined if <tr1/memory> is to be
used for shared_ptr, otherwise <memory> is to be used.
- CERES_TR1_SHARED_PTR defined if shared_ptr is defined in
std::tr1 namespace, otherwise it's defined in std namespace.
All the shared_ptr checks are now moved to own file FindSharedPtr
which simplifies main CMakeLists.
Change-Id: I558a74793baaa0bd088801910a356be4ef17c31b
commit 02db9414fb6739857a37e268500083a0546cd0a3
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Mon Apr 28 08:32:51 2014 -0700
Fix the documentation for RandNormal.
As pointed out by Jim Roseborough, this is the Marsaglia Polar
method and not the Box-Muller method.
Change-Id: Id5332bcd4b4c23a3885cc296729b44eaa5edd0a8
commit 32530788d08c53f8d2c8a5f9bd61aa60a23d6e03
Author: Richard Stebbing <richie.stebbing@gmail.com>
Date: Sat Apr 26 07:42:23 2014 +0100
Add dynamic_sparsity option.
The standard sparse normal Cholesky solver assumes a fixed
sparsity pattern which is useful for a large number of problems
presented to Ceres. However, some problems are symbolically dense
but numerically sparse i.e. each residual is a function of a
large number of parameters but at any given state the residual
only depends on a sparse subset of them. For these class of
problems it is faster to re-analyse the sparsity pattern of the
jacobian at each iteration of the non-linear optimisation instead
of including all of the zero entries in the step computation.
The proposed solution adds the dynamic_sparsity option which can
be used with SPARSE_NORMAL_CHOLESKY. A
DynamicCompressedRowSparseMatrix type (which extends
CompressedRowSparseMatrix) has been introduced which allows
dynamic addition and removal of elements. A Finalize method is
provided which then consolidates the matrix so that it can be
used in place of a regular CompressedRowSparseMatrix. An
associated jacobian writer has also been provided.
Changes that were required to make this extension were adding the
SetMaxNumNonZeros method to CompressedRowSparseMatrix and adding
a JacobianFinalizer template parameter to the ProgramEvaluator.
Change-Id: Ia5a8a9523fdae8d5b027bc35e70b4611ec2a8d01
commit 2569076ff0bf8ffb3938da8b5df7edc4883aa053
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Fri Apr 25 23:54:48 2014 -0700
More NDK fixes.
Fix variable names in port.h and fix fpclassify when
using gnustl. This was tested by switching to gnustl
in the JNI build.
Thanks to Carlos Hernandez for suggesting the gnustl fixes.
Change-Id: I690b73caf495ccc79061f45288e416da1604cc72
commit e55596f8860a09b12b5e1f949237f15357c1ac59
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Fri Apr 25 16:17:19 2014 -0700
Change the defaults for shared_ptr.
By default shared_ptr is now assumed to be
in the standard <memory> header and in the
std namespace.
Previously the way the ifdefs were structured if the appropriate
variable was not defined, it would default to <t1/memory>.
The new defaults are more future proof.
Change-Id: If457806191196be2b6425b8289ea7a3488a27445
commit bb05be341b8436f611e4b69954a529edcca5b577
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Sun Apr 13 14:22:19 2014 -0700
Solver::Options uses shared_ptr to handle ownership.
Solver::Options::linear_solver_ordering and
Solver::Options::inner_iteration_ordering
were bare pointers even though Solver::Options took ownership of these
objects.
This lead to buggy user code and the inability to copy Solver::Options
objects around.
With this change, these naked pointers have been replaced by a
shared_ptr object which will managed the lifetime of these objects. This
also leads to simplification of the lifetime handling of these objects
inside the solver.
The Android.mk and Application.mk files have also been updated
to use a newer NDK revision which ships with LLVM's libc++.
Change-Id: I25161fb3ddf737be0b3e5dfd8e7a0039b22548cd
commit 8e0991381ea3a2baddea017cd07b333f0c5de595
Author: Joydeep Biswas <joydeep.biswas@gmail.com>
Date: Tue Apr 22 10:40:47 2014 -0400
Added a simplified robotics example for DynamicAutoDiffCostFunction.
Change-Id: I9520e0a9a8d9743285c5114523fbafa6ffa5b0bd
commit cc9d3bba1008066e51502cabd956985c6bdedfe8
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Fri Apr 18 22:58:09 2014 -0700
Remove a comment from conf.py
Change-Id: I675f7e8fc5dd2143eab74901bc7241e02e37285f
commit c4cd29dd7c80ade5b3ac7a1f6ee7df22c8869ab5
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Wed Apr 16 23:40:12 2014 -0700
Merge landing page with introduction.
The existing introduction was a bit redundant and also
was not really an introduction. Also updated the build
instructions to reflect the new reality on Mac OSX.
Also updated the beginning of the tutorial to be a bit
gentler and updated the history to be more consistent
Change-Id: Ife38c1949252cf9f4c6301856957f2d38365f313
commit 46ccfb376ac52ac159f9187e0f7384ef68c1cbdd
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Sat Apr 12 21:56:51 2014 -0700
Cleanup block_structure.h/cc
1. Remove obsolete Proto conversion functions.
2. Fix a strict weak ordering bug.
Change-Id: I1ce6d4b06e29cf475df1d5bd37c79f66f20f8d93
commit 7d489fdb073937ac05c0693c1902fbcb9eeb7dfc
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Mon Apr 7 11:14:51 2014 -0700
Refactor the landing page to be a bit more compact.
Also minor changes to the introduction.
Change-Id: Iaa71f576b95c869f075d6837dbb60ba4bb608ee7
commit 406ac7816730c15425db20d994ac0d60d932ab6c
Author: Keir Mierle <mierle@gmail.com>
Date: Mon Apr 7 08:36:07 2014 +0000
Rework Ceres documentation as new website
This reworks the Ceres Sphinx documentation such that it can
function as the main Ceres website, now hosted at
ceres-solver.org. This also changes to the theme sphinx_rtd_theme
used by Read The Docs; this theme has strong mobile support and is
well enough designed.
Change-Id: I63232d985859a6dac94ff58f08bf81eb2b9e7f99
commit 3e60a998ac970da659d590bac2ff892ee619aa1b
Author: Richard Bowen <rsbowen@google.com>
Date: Tue Apr 1 16:22:49 2014 -0700
Added support and tests: row and column blocks for sparse matrix
transpose.
Change-Id: Ife641b08a9e86826478521a405f21ba60667f0e8
commit 5ecb1c3f1dfde6e8ed4b493eafef7b43dad19e72
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Tue Apr 1 09:20:35 2014 -0700
Add Problem::IsParameterBlockPresent.
This allows the user to query the Problem to see if a
parameter block is already present or not.
Change-Id: If786f6c008cc644f3398597901d718d12a6d865d
commit 75e2232b29ff2ea42c8406c9d45b138a7e7a0048
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Fri Mar 28 11:21:02 2014 -0700
Fix spacing in building.rst
Change-Id: I4c68d732c80d7ff2bdbc812bf0b7c7fb98c43957
commit b555b489b8447434294a8a6676272289140d6a1d
Author: Richard Bowen <rsbowen@google.com>
Date: Thu Mar 27 15:51:28 2014 -0700
Changes documentation to reflect changes in output format.
Change-Id: Ic0ba234283e791edcad29aec067905dcb2130813
commit 1cfb600bfc3be8342f85f155b2b219a595ee58da
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Wed Mar 26 12:55:11 2014 -0700
Add the (2,4,9) template specialization for PartitionedMatrixView
and SchurEliminator.
Also update the comment inside generate_partitioned_matrix_view_specializations.py
Change-Id: I99a7ab4256091b1da48553da3076e5996a5757ed
commit 195e49351b386ffc23020d406883eaa6511e29b3
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Wed Nov 20 19:56:06 2013 +0000
Date: Wed Mar 26 11:36:11 2014 +0000
Adding threads libraries to exported dependencies if using OpenMP.
Check validity of residual block before removal in RemoveResidualBlock.
- Previously we were only adding the flags to the link flags for the
Ceres project, which resulted in them not being exported. Thus
projects importing Ceres (if using OpenMP) would have to manually
specify them in addition to CERES_LIBRARIES.
- Breaking change: Problem::Options::enable_fast_parameter_block_removal
is now Problem::Options::enable_fast_removal, as it now controls
the behaviour for both parameter and residual blocks.
Change-Id: If0354cc07e84dbebfc870a8862e1a8ca64659791
- Previously we did not check that the specified residual block to
remove in RemoveResidualBlock actually represented a valid residual
for the problem.
- This meant that Ceres would die unexpectedly if the user passed an
uninitialised residual_block, or more likely attempted to remove a
residual block that had already been removed automatically after
the user removed a parameter block upon on which it was dependent.
- RemoveResidualBlock now verifies the validity of the given
residual_block to remove. Either by checking against a hash set of
all residuals maintained in ProblemImpl iff enable_fast_removal
is enabled. Or by a full scan of the residual blocks if not.
Change-Id: I9ab178e2f68a74135f0a8e20905b16405c77a62b
commit 6c0d96424e2c27326757936a3738f9efc37c6c24
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Wed Nov 20 11:52:01 2013 -0800
Minor documentation fix.
Thanks to Satya Mallick.
Change-Id: I556f1c141bf16739d54450351b0f29fd4ea40014
commit 7747bb0e6b0e54366933ed75c1bcafe6a1109c3d
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Wed Nov 20 11:29:22 2013 -0800
Minor corrections to the documentation.
Thanks to Satya Mallick for reporting these.
Change-Id: Ia52e08a7e21d5247dc475cfbf10bf57265aa118f
commit 3fca2c4b2fae9abcaa9611f2bd3885ce6b11963b
commit 74762b60332d4a1c08ec5aef75ec718da9d305a2
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Mon Nov 18 10:26:49 2013 +0000
Date: Thu Mar 20 14:50:25 2014 +0000
Decreasing update threshold for BFGS as per L-BFGS.
Allow construction of an AutoDiffLocalParameterization with a functor.
- Improves performance of BFGS on NIST, as per L-BFGS.
- Adding explanation of origin and purpose of Secant condition
tolerance check for Hessian update in (L)BFGS.
- Previously AutoDiffLocalParameterization would internally instantiate
a functor instance whenever one was required. This prohibits the
user passing arguments to the constructor of the functor.
- Now AutoDiffLocalParameterization can take over ownership of an
allocated functor which the user created. This mimics the behaviour
of AutoDiffCostFunction.
Change-Id: If57b9957d31d8629c772c19a069e1e56e727b350
Change-Id: I264e1face44ca5d5e71cc20c77cc7654d3f74cc0
commit 54fcbf893852272ba2158d6a56572a2eb3ccc41f
commit 4f603fb0d82317a53fa9d96abe6a97b2e69bff36
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Tue Nov 19 10:12:05 2013 -0800
Date: Wed Mar 19 17:16:43 2014 -0700
Relax the requirements on loss functiond derivatives.
Grammer fixes from William Rucklidge.
We now require that the first derivative of the loss function
be positive only if the second derivative is non-zero. This is
because when the second derivative is non-positive, we do not use
the second order correction suggested by BANS and instead use
a simpler first order strategy which does not use a division by
the gradient of the loss function.
Change-Id: I3d65713f152611998e196ff389a7081acfdfd8c1
commit db98425b94c9eff9b125bf4a854545162e8c1aec
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Fri Nov 15 14:14:09 2013 -0800
Small bugfix to logging.h from Scott Ettinger.
Change-Id: Ie6d51e7883adf36c6fc7a78ff95afab6a78e488b
commit 4d0e626b55f36ab8f44a4acc8157b85cfecd4673
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Fri Nov 15 13:53:44 2013 +0000
Fixing gflags HINTS variable names (adding missing “_DIR”).
- The HINTS variables for gflags were incorrectly used as
GFLAGS_[INCLUDE/LIBRARY]_HINTS when they should have been
GFLAGS_[INCLUDE/LIBRARY]_DIR_HINTS as per the docs.
- Also removing a completed TODO in the main CMakeLists.
- Updating method of extracting current directory in CeresConfig.cmake
to avoid use of CMAKE_CURRENT_LIST_DIR, which was not present in
CMake =< v2.8.3.
Change-Id: I42ae696e3b785febe48688d912f0f343e8947cb0
commit bf4c1b76e4926c738fc805e9ff4be0ed584d9eee
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Thu Nov 14 21:27:20 2013 +0000
Decreasing threshold at which L-BFGS Hessian is updated.
- Decreasing threshold at which L-BFGS Hessian is updated from 1e-10
to 1e-14 results in a very significant improvement in NIST scores
(43 -> 53 for CUBIC).
- Adding comment in FindPolynomialRoots() explaining why behaviour
is correct.
Change-Id: If668e087e7a86d29659aa74e8528b192b604c841
commit 7124c3474cd201134c3a3350b46aca468f1edafa
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Thu Nov 7 16:10:02 2013 +0000
Fixes for some line search bugs & corner cases.
- Increase precision of numeric values output in error messages to
allow for easier debugging.
- Ensure termination after Wolfe search bracketing phase if bracket
width has been shrunk to below tolerance.
- Cleaned up return value for BracketingPhase(), now false iff
optimisation should stop, true otherwise.
- Fix bug whereby we would mark a step size as satisfying the Wolfe
conditions when it did not due to numerical issues in the cost
function.
- Adding explanation of a subtlety in which a zoom could still be
acceptably invoked with bracket_low.f > bracket_high.f.
- Replacing hard check of a pre-condition of ZoomPhase() with a
conditional return if not satisfied to address issue whereby a
bracket could be incorrectly identified due to inconsistent values
& gradients returned from the cost function.
- Adding missing check for step size validity in line search minimizer.
- Adding ToDebugString() for FunctionSample.
Change-Id: Iad98e635749877f80c079ebad126bf022d82232d
commit 54fc9423673886ac9ed3fe329a80f07544aeea70
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Thu Nov 14 11:42:00 2013 +0000
Removing incorrect specialisation of install dirs on Windows.
- Previously on Windows the leaf include & lib install directories
passed to CeresConfig.cmake.in when configured where capitalised on
Windows.
- This capitalisation was incorrect, as the actual paths used are
specified in the install() statements and are always in the standard
lower-case form.
- This likely did not cause any issues previously as although NTFS is
case sensitive, the Win32 API is not, and most applications access
files through the Win32 API, and are thus not case-sensitive.
Change-Id: I335b6e2d10a1c64f320c2a1a68eeda1b22344e73
commit fcbbb11e37386097b1427dc3aa89f264d6951ded
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Wed Nov 13 22:22:30 2013 +0000
Ensure build paths for dependencies are searched in FindPackage(Ceres)
- Append to hint locations used by FindPackage scripts for public
dependencies (glog & Eigen) the locations of the dependencies when
Ceres was built.
- This means that the user should not have to supply them again when
using find_package(Ceres) even if they are installed in a
non-standard location.
Change-Id: I9550de91025ba47f01f1ea3c3fefe80fe38d14ff
commit 7899e45d378f589a67ad8e042bf6a7cb7e15df00
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Wed Nov 13 21:08:27 2013 +0000
Fixing a documentation typo, DIRS -> DIR in HINTS variables.
Change-Id: I42b75a5e0b8a451c3a43ab29d0c14856e4b86ab8
commit 1a041c35b780e60c3b497eb096b72ad20f47960e
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Tue Nov 12 14:17:52 2013 -0800
Update to 1.8.0.
Change-Id: Id42e594f03e3575d06e18c1ef66df64f43d86839
commit 36b26139296060511718b3ef0da03a52706db481
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Thu Nov 7 16:57:36 2013 +0000
Fix ordering of ParseCommandLineFlags() & InitGoogleTest() for Windows.
- On Windows gtest passes additional non-gflags command line flags
for death-tests, to avoid gflags invoking an error for these flags
InitGoogleTest() must be called before ParseCommandLineFlags() to
handle and remove them before gflags parses the remaining flags.
Change-Id: I0c705ecd3aa029b70a2589b592e6a2c192745c0e
commit 8c155d51fab099ee7bf64f4bdbfeda82881925a5
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Fri Nov 8 08:04:44 2013 -0800
Speed up the application of robust loss functions.
Since we added special handling for the case for rho[2] < 0,
the bulk of CorrectJacobian is pointless in the common case.
So add a simple one dimensional loop which rescales the Jacobian.
This speeds up this method immensely.
The robustification of a Jacobian gets speeded up by > 50%.
Change-Id: I97c4e897ccbb5521c053e1fb931c5d0d32f542c7
commit 58792dc8ee0e4b56331f33f753f1b1932c5c2960
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Wed Nov 6 09:42:46 2013 -0800
Update to 1.8.0rc2.
Change-Id: Ifbf5312377bf1791a29aefd3edc3a765999c5824
commit af04d7f18740faf452e9171af530aa1bdead44bb
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Tue Nov 5 13:47:30 2013 -0800
Remove DCHECK_GE checks from fixed_array.h
This triggers -Wtype-limits warnings on comparisons
which are always true, since the test being done is
n >= 0, where n is of type size_t, which is always
true.
This causes problems when compiling Ceres on linux
with miniglog.
Change-Id: Ia1d1d1483e03469c71fde029b62ca6d84e9b27e0
Change-Id: Ia40df7a1d141eb2552694510453d1431bb0c8dce

View File

@@ -29,29 +29,28 @@ defs.append('CERES_HAVE_RWLOCK')
if env['WITH_BF_OPENMP']:
defs.append('CERES_USE_OPENMP')
def define_unordered_map(conf):
found, namespace, include_prefix = test_unordered_map(conf)
if found:
if not include_prefix:
if namespace == 'std':
defs.append('CERES_STD_UNORDERED_MAP')
return True
elif namespace == 'std::tr1':
defs.append('CERES_STD_UNORDERED_MAP_IN_TR1_NAMESPACE')
return True
else:
if namespace == 'std::tr1':
defs.append('CERES_TR1_UNORDERED_MAP')
return True
return False
conf = Configure(env)
if not define_unordered_map(conf):
if env['WITH_UNORDERED_MAP_SUPPORT']:
if env['UNORDERED_MAP_HEADER'] == 'unordered_map':
if env['UNORDERED_MAP_NAMESPACE'] == 'std':
defs.append('CERES_STD_UNORDERED_MAP')
elif env['UNORDERED_MAP_NAMESPACE'] == 'std::tr1':
defs.append('CERES_STD_UNORDERED_MAP_IN_TR1_NAMESPACE')
elif env['UNORDERED_MAP_NAMESPACE'] == 'std::tr1':
defs.append('CERES_TR1_UNORDERED_MAP')
else:
print("-- Replacing unordered_map/set with map/set (warning: slower!)")
defs.append('CERES_NO_UNORDERED_MAP')
env = conf.Finish()
incs = '. ../../ ../../../Eigen3 ./include ./internal ../gflags'
if not env['WITH_SHARED_PTR_SUPPORT']:
print("-- Unable to find shared_ptr which is required for compilation.")
exit(1)
if env['SHARED_PTR_HEADER'] == 'tr1/memory':
defs.append('CERES_TR1_MEMORY_HEADER')
if env['SHARED_PTR_NAMESPACE'] == 'std::tr1':
defs.append('CERES_TR1_SHARED_PTR')
incs = '. ../../ ../../../Eigen3 ./include ./internal ../gflags ./config'
# work around broken hashtable in 10.5 SDK
if env['OURPLATFORM'] == 'darwin' and env['WITH_BF_BOOST']:

View File

@@ -123,6 +123,7 @@ set(INC
.
include
internal
config
../gflags
../../
)
@@ -230,29 +231,28 @@ defs.append('CERES_HAVE_RWLOCK')
if env['WITH_BF_OPENMP']:
defs.append('CERES_USE_OPENMP')
def define_unordered_map(conf):
found, namespace, include_prefix = test_unordered_map(conf)
if found:
if not include_prefix:
if namespace == 'std':
defs.append('CERES_STD_UNORDERED_MAP')
return True
elif namespace == 'std::tr1':
defs.append('CERES_STD_UNORDERED_MAP_IN_TR1_NAMESPACE')
return True
else:
if namespace == 'std::tr1':
defs.append('CERES_TR1_UNORDERED_MAP')
return True
return False
conf = Configure(env)
if not define_unordered_map(conf):
if env['WITH_UNORDERED_MAP_SUPPORT']:
if env['UNORDERED_MAP_HEADER'] == 'unordered_map':
if env['UNORDERED_MAP_NAMESPACE'] == 'std':
defs.append('CERES_STD_UNORDERED_MAP')
elif env['UNORDERED_MAP_NAMESPACE'] == 'std::tr1':
defs.append('CERES_STD_UNORDERED_MAP_IN_TR1_NAMESPACE')
elif env['UNORDERED_MAP_NAMESPACE'] == 'std::tr1':
defs.append('CERES_TR1_UNORDERED_MAP')
else:
print("-- Replacing unordered_map/set with map/set (warning: slower!)")
defs.append('CERES_NO_UNORDERED_MAP')
env = conf.Finish()
incs = '. ../../ ../../../Eigen3 ./include ./internal ../gflags'
if not env['WITH_SHARED_PTR_SUPPORT']:
print("-- Unable to find shared_ptr which is required for compilation.")
exit(1)
if env['SHARED_PTR_HEADER'] == 'tr1/memory':
defs.append('CERES_TR1_MEMORY_HEADER')
if env['SHARED_PTR_NAMESPACE'] == 'std::tr1':
defs.append('CERES_TR1_SHARED_PTR')
incs = '. ../../ ../../../Eigen3 ./include ./internal ../gflags ./config'
# work around broken hashtable in 10.5 SDK
if env['OURPLATFORM'] == 'darwin' and env['WITH_BF_BOOST']:

View File

@@ -0,0 +1,45 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: alexs.mac@gmail.com (Alex Stewart)
// Default (empty) configuration options for Ceres.
//
// IMPORTANT: Most users of Ceres will not use this file, when compiling Ceres
// with CMake, CMake will configure a new config.h with the currently
// selected Ceres compile options and copy it into the source
// directory before compilation. However, for some users of Ceres
// who compile without CMake, this file ensures that Ceres will
// compile, with the user either specifying manually the Ceres
// compile options, or passing them directly through the compiler.
#ifndef CERES_PUBLIC_INTERNAL_CONFIG_H_
#define CERES_PUBLIC_INTERNAL_CONFIG_H_
#endif // CERES_PUBLIC_INTERNAL_CONFIG_H_

View File

@@ -43,8 +43,6 @@ internal/ceres/block_jacobian_writer.cc
internal/ceres/block_jacobian_writer.h
internal/ceres/block_jacobi_preconditioner.cc
internal/ceres/block_jacobi_preconditioner.h
internal/ceres/block_random_access_crs_matrix.cc
internal/ceres/block_random_access_crs_matrix.h
internal/ceres/block_random_access_dense_matrix.cc
internal/ceres/block_random_access_dense_matrix.h
internal/ceres/block_random_access_diagonal_matrix.cc
@@ -64,6 +62,7 @@ internal/ceres/casts.h
internal/ceres/cgnr_linear_operator.h
internal/ceres/cgnr_solver.cc
internal/ceres/cgnr_solver.h
internal/ceres/CMakeLists.txt
internal/ceres/collections_port.h
internal/ceres/compressed_col_sparse_matrix_utils.cc
internal/ceres/compressed_col_sparse_matrix_utils.h
@@ -94,6 +93,11 @@ internal/ceres/detect_structure.cc
internal/ceres/detect_structure.h
internal/ceres/dogleg_strategy.cc
internal/ceres/dogleg_strategy.h
internal/ceres/dynamic_compressed_row_finalizer.h
internal/ceres/dynamic_compressed_row_jacobian_writer.cc
internal/ceres/dynamic_compressed_row_jacobian_writer.h
internal/ceres/dynamic_compressed_row_sparse_matrix.cc
internal/ceres/dynamic_compressed_row_sparse_matrix.h
internal/ceres/evaluator.cc
internal/ceres/evaluator.h
internal/ceres/execution_summary.h
@@ -109,6 +113,8 @@ internal/ceres/generated/partitioned_matrix_view_2_3_9.cc
internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
internal/ceres/generated/partitioned_matrix_view_2_4_4.cc
internal/ceres/generated/partitioned_matrix_view_2_4_8.cc
internal/ceres/generated/partitioned_matrix_view_2_4_9.cc
internal/ceres/generated/partitioned_matrix_view_2_4_d.cc
internal/ceres/generated/partitioned_matrix_view_2_d_d.cc
internal/ceres/generated/partitioned_matrix_view_4_4_2.cc
@@ -126,6 +132,8 @@ internal/ceres/generated/schur_eliminator_2_3_9.cc
internal/ceres/generated/schur_eliminator_2_3_d.cc
internal/ceres/generated/schur_eliminator_2_4_3.cc
internal/ceres/generated/schur_eliminator_2_4_4.cc
internal/ceres/generated/schur_eliminator_2_4_8.cc
internal/ceres/generated/schur_eliminator_2_4_9.cc
internal/ceres/generated/schur_eliminator_2_4_d.cc
internal/ceres/generated/schur_eliminator_2_d_d.cc
internal/ceres/generated/schur_eliminator_4_4_2.cc
@@ -231,3 +239,4 @@ internal/ceres/visibility.cc
internal/ceres/visibility.h
internal/ceres/wall_time.cc
internal/ceres/wall_time.h
config/ceres/internal/config.h

View File

@@ -107,11 +107,18 @@ namespace ceres {
template <typename Functor, int kGlobalSize, int kLocalSize>
class AutoDiffLocalParameterization : public LocalParameterization {
public:
AutoDiffLocalParameterization() :
functor_(new Functor()) {}
// Takes ownership of functor.
explicit AutoDiffLocalParameterization(Functor* functor) :
functor_(functor) {}
virtual ~AutoDiffLocalParameterization() {}
virtual bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const {
return Functor()(x, delta, x_plus_delta);
return (*functor_)(x, delta, x_plus_delta);
}
virtual bool ComputeJacobian(const double* x, double* jacobian) const {
@@ -128,7 +135,7 @@ class AutoDiffLocalParameterization : public LocalParameterization {
const double* parameter_ptrs[2] = {x, zero_delta};
double* jacobian_ptrs[2] = { NULL, jacobian };
return internal::AutoDiff<Functor, double, kGlobalSize, kLocalSize>
::Differentiate(Functor(),
::Differentiate(*functor_,
parameter_ptrs,
kGlobalSize,
x_plus_delta,
@@ -137,6 +144,9 @@ class AutoDiffLocalParameterization : public LocalParameterization {
virtual int GlobalSize() const { return kGlobalSize; }
virtual int LocalSize() const { return kLocalSize; }
private:
internal::scoped_ptr<Functor> functor_;
};
} // namespace ceres

View File

@@ -38,12 +38,14 @@
#ifndef CERES_PUBLIC_C_API_H_
#define CERES_PUBLIC_C_API_H_
#include "ceres/internal/port.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Init the Ceres private data. Must be called before anything else. */
void ceres_init();
CERES_EXPORT void ceres_init();
/* Equivalent to CostFunction::Evaluate() in the C++ API.
*
@@ -88,23 +90,23 @@ typedef void (*ceres_loss_function_t)(void* user_data,
*
* See loss_function.h for the details of each loss function.
*/
void* ceres_create_huber_loss_function_data(double a);
void* ceres_create_softl1_loss_function_data(double a);
void* ceres_create_cauchy_loss_function_data(double a);
void* ceres_create_arctan_loss_function_data(double a);
void* ceres_create_tolerant_loss_function_data(double a, double b);
CERES_EXPORT void* ceres_create_huber_loss_function_data(double a);
CERES_EXPORT void* ceres_create_softl1_loss_function_data(double a);
CERES_EXPORT void* ceres_create_cauchy_loss_function_data(double a);
CERES_EXPORT void* ceres_create_arctan_loss_function_data(double a);
CERES_EXPORT void* ceres_create_tolerant_loss_function_data(double a, double b);
/* Free the given stock loss function data. */
void ceres_free_stock_loss_function_data(void* loss_function_data);
CERES_EXPORT void ceres_free_stock_loss_function_data(void* loss_function_data);
/* This is an implementation of ceres_loss_function_t contained within Ceres
* itself, intended as a way to access the various stock Ceres loss functions
* from the C API. This should be passed to ceres_add_residual() below, in
* combination with a user_data pointer generated by
* ceres_create_stock_loss_function() above. */
void ceres_stock_loss_function(void* user_data,
double squared_norm,
double out[3]);
CERES_EXPORT void ceres_stock_loss_function(void* user_data,
double squared_norm,
double out[3]);
/* Equivalent to Problem from the C++ API. */
struct ceres_problem_s;
@@ -115,11 +117,11 @@ typedef struct ceres_residual_block_id_s ceres_residual_block_id_t;
/* Create and destroy a problem */
/* TODO(keir): Add options for the problem. */
ceres_problem_t* ceres_create_problem();
void ceres_free_problem(ceres_problem_t* problem);
CERES_EXPORT ceres_problem_t* ceres_create_problem();
CERES_EXPORT void ceres_free_problem(ceres_problem_t* problem);
/* Add a residual block. */
ceres_residual_block_id_t* ceres_problem_add_residual_block(
CERES_EXPORT ceres_residual_block_id_t* ceres_problem_add_residual_block(
ceres_problem_t* problem,
ceres_cost_function_t cost_function,
void* cost_function_data,
@@ -130,7 +132,7 @@ ceres_residual_block_id_t* ceres_problem_add_residual_block(
int* parameter_block_sizes,
double** parameters);
void ceres_solve(ceres_problem_t* problem);
CERES_EXPORT void ceres_solve(ceres_problem_t* problem);
/* TODO(keir): Figure out a way to pass a config in. */

View File

@@ -34,8 +34,8 @@
#ifndef CERES_PUBLIC_CERES_H_
#define CERES_PUBLIC_CERES_H_
#define CERES_VERSION 1.8.0
#define CERES_ABI_VERSION 1.8.0
#define CERES_VERSION 1.9.0
#define CERES_ABI_VERSION 1.9.0
#include "ceres/autodiff_cost_function.h"
#include "ceres/autodiff_local_parameterization.h"

View File

@@ -70,7 +70,7 @@ namespace ceres {
// ccf_residual[i] = f_i(my_cost_function_residual[i])
//
// and the Jacobian will be affected appropriately.
class ConditionedCostFunction : public CostFunction {
class CERES_EXPORT ConditionedCostFunction : public CostFunction {
public:
// Builds a cost function based on a wrapped cost function, and a
// per-residual conditioner. Takes ownership of all of the wrapped cost

View File

@@ -60,7 +60,7 @@ namespace ceres {
// code inheriting from this class is expected to set these two members with the
// corresponding accessors. This information will be verified by the Problem
// when added with AddResidualBlock().
class CostFunction {
class CERES_EXPORT CostFunction {
public:
CostFunction() : num_residuals_(0) {}

View File

@@ -196,9 +196,9 @@ class CovarianceImpl;
// covariance.GetCovarianceBlock(y, y, covariance_yy)
// covariance.GetCovarianceBlock(x, y, covariance_xy)
//
class Covariance {
class CERES_EXPORT Covariance {
public:
struct Options {
struct CERES_EXPORT Options {
Options()
#ifndef CERES_NO_SUITESPARSE
: algorithm_type(SPARSE_QR),

View File

@@ -38,7 +38,7 @@ namespace ceres {
// A compressed row sparse matrix used primarily for communicating the
// Jacobian matrix to the user.
struct CRSMatrix {
struct CERES_EXPORT CRSMatrix {
CRSMatrix() : num_rows(0), num_cols(0) {}
int num_rows;

View File

@@ -46,25 +46,24 @@
namespace ceres {
#if defined(_MSC_VER)
inline bool IsFinite (double x) { return _finite(x); }
inline bool IsInfinite(double x) { return !_finite(x) && !_isnan(x); }
inline bool IsNaN (double x) { return _isnan(x); }
inline bool IsFinite (double x) { return _finite(x) != 0; }
inline bool IsInfinite(double x) { return _finite(x) == 0 && _isnan(x) == 0; }
inline bool IsNaN (double x) { return _isnan(x) != 0; }
inline bool IsNormal (double x) {
int classification = _fpclass(x);
return classification == _FPCLASS_NN ||
classification == _FPCLASS_PN;
}
#elif defined(ANDROID)
// On Android when using the GNU STL, the C++ fpclassify functions are not
// available. Strictly speaking, the std functions are are not standard until
// C++11. Instead use the C99 macros on Android.
#elif defined(ANDROID) && defined(_STLPORT_VERSION)
// On Android, when using the STLPort, the C++ isnan and isnormal functions
// are defined as macros.
inline bool IsNaN (double x) { return isnan(x); }
inline bool IsNormal (double x) { return isnormal(x); }
// On Android NDK r6, when using STLPort, the isinf and isfinite functions are
// not available, so reimplement them.
# if defined(_STLPORT_VERSION)
inline bool IsInfinite(double x) {
return x == std::numeric_limits<double>::infinity() ||
x == -std::numeric_limits<double>::infinity();
@@ -72,17 +71,15 @@ inline bool IsInfinite(double x) {
inline bool IsFinite(double x) {
return !isnan(x) && !IsInfinite(x);
}
# else
inline bool IsFinite (double x) { return isfinite(x); }
inline bool IsInfinite(double x) { return isinf(x); }
# endif // defined(_STLPORT_VERSION)
#else
# else
// These definitions are for the normal Unix suspects.
// TODO(keir): Test the "else" with more platforms.
inline bool IsFinite (double x) { return std::isfinite(x); }
inline bool IsInfinite(double x) { return std::isinf(x); }
inline bool IsNaN (double x) { return std::isnan(x); }
inline bool IsNormal (double x) { return std::isnormal(x); }
#endif
} // namespace ceres

View File

@@ -31,8 +31,19 @@
#ifndef CERES_PUBLIC_INTERNAL_PORT_H_
#define CERES_PUBLIC_INTERNAL_PORT_H_
// This file needs to compile as c code.
#ifdef __cplusplus
#include <string>
#include "ceres/internal/config.h"
#if defined(CERES_TR1_MEMORY_HEADER)
#include <tr1/memory>
#else
#include <memory>
#endif
namespace ceres {
// It is unfortunate that this import of the entire standard namespace is
@@ -45,6 +56,33 @@ using namespace std;
// "string" implementation in the global namespace.
using std::string;
#if defined(CERES_TR1_SHARED_PTR)
using std::tr1::shared_ptr;
#else
using std::shared_ptr;
#endif
} // namespace ceres
#endif // __cplusplus
// A macro to signal which functions and classes are exported when
// building a DLL with MSVC.
//
// Note that the ordering here is important, CERES_BUILDING_SHARED_LIBRARY
// is only defined locally when Ceres is compiled, it is never exported to
// users. However, in order that we do not have to configure config.h
// separately for building vs installing, if we are using MSVC and building
// a shared library, then both CERES_BUILDING_SHARED_LIBRARY and
// CERES_USING_SHARED_LIBRARY will be defined when Ceres is compiled.
// Hence it is important that the check for CERES_BUILDING_SHARED_LIBRARY
// happens first.
#if defined(_MSC_VER) && defined(CERES_BUILDING_SHARED_LIBRARY)
# define CERES_EXPORT __declspec(dllexport)
#elif defined(_MSC_VER) && defined(CERES_USING_SHARED_LIBRARY)
# define CERES_EXPORT __declspec(dllimport)
#else
# define CERES_EXPORT
#endif
#endif // CERES_PUBLIC_INTERNAL_PORT_H_

View File

@@ -41,7 +41,7 @@ namespace ceres {
// This struct describes the state of the optimizer after each
// iteration of the minimization.
struct IterationSummary {
struct CERES_EXPORT IterationSummary {
IterationSummary()
: iteration(0),
step_is_valid(false),
@@ -211,7 +211,7 @@ struct IterationSummary {
// const bool log_to_stdout_;
// };
//
class IterationCallback {
class CERES_EXPORT IterationCallback {
public:
virtual ~IterationCallback() {}
virtual CallbackReturnType operator()(const IterationSummary& summary) = 0;

View File

@@ -649,6 +649,8 @@ struct NumTraits<ceres::Jet<T, N> > {
return ceres::Jet<T, N>(1e-12);
}
static inline Real epsilon() { return Real(std::numeric_limits<T>::epsilon()); }
enum {
IsComplex = 0,
IsInteger = 0,

View File

@@ -107,7 +107,7 @@ namespace ceres {
//
// The class LocalParameterization defines the function Plus and its
// Jacobian which is needed to compute the Jacobian of f w.r.t delta.
class LocalParameterization {
class CERES_EXPORT LocalParameterization {
public:
virtual ~LocalParameterization() {}
@@ -133,7 +133,7 @@ class LocalParameterization {
// Some basic parameterizations
// Identity Parameterization: Plus(x, delta) = x + delta
class IdentityParameterization : public LocalParameterization {
class CERES_EXPORT IdentityParameterization : public LocalParameterization {
public:
explicit IdentityParameterization(int size);
virtual ~IdentityParameterization() {}
@@ -150,7 +150,7 @@ class IdentityParameterization : public LocalParameterization {
};
// Hold a subset of the parameters inside a parameter block constant.
class SubsetParameterization : public LocalParameterization {
class CERES_EXPORT SubsetParameterization : public LocalParameterization {
public:
explicit SubsetParameterization(int size,
const vector<int>& constant_parameters);
@@ -160,7 +160,9 @@ class SubsetParameterization : public LocalParameterization {
double* x_plus_delta) const;
virtual bool ComputeJacobian(const double* x,
double* jacobian) const;
virtual int GlobalSize() const { return constancy_mask_.size(); }
virtual int GlobalSize() const {
return static_cast<int>(constancy_mask_.size());
}
virtual int LocalSize() const { return local_size_; }
private:
@@ -172,7 +174,7 @@ class SubsetParameterization : public LocalParameterization {
// with * being the quaternion multiplication operator. Here we assume
// that the first element of the quaternion vector is the real (cos
// theta) part.
class QuaternionParameterization : public LocalParameterization {
class CERES_EXPORT QuaternionParameterization : public LocalParameterization {
public:
virtual ~QuaternionParameterization() {}
virtual bool Plus(const double* x,

View File

@@ -82,7 +82,7 @@
namespace ceres {
class LossFunction {
class CERES_EXPORT LossFunction {
public:
virtual ~LossFunction() {}
@@ -128,7 +128,7 @@ class LossFunction {
// It is not normally necessary to use this, as passing NULL for the
// loss function when building the problem accomplishes the same
// thing.
class TrivialLoss : public LossFunction {
class CERES_EXPORT TrivialLoss : public LossFunction {
public:
virtual void Evaluate(double, double*) const;
};
@@ -171,7 +171,7 @@ class TrivialLoss : public LossFunction {
//
// The scaling parameter 'a' corresponds to 'delta' on this page:
// http://en.wikipedia.org/wiki/Huber_Loss_Function
class HuberLoss : public LossFunction {
class CERES_EXPORT HuberLoss : public LossFunction {
public:
explicit HuberLoss(double a) : a_(a), b_(a * a) { }
virtual void Evaluate(double, double*) const;
@@ -187,7 +187,7 @@ class HuberLoss : public LossFunction {
// rho(s) = 2 (sqrt(1 + s) - 1).
//
// At s = 0: rho = [0, 1, -1/2].
class SoftLOneLoss : public LossFunction {
class CERES_EXPORT SoftLOneLoss : public LossFunction {
public:
explicit SoftLOneLoss(double a) : b_(a * a), c_(1 / b_) { }
virtual void Evaluate(double, double*) const;
@@ -204,7 +204,7 @@ class SoftLOneLoss : public LossFunction {
// rho(s) = log(1 + s).
//
// At s = 0: rho = [0, 1, -1].
class CauchyLoss : public LossFunction {
class CERES_EXPORT CauchyLoss : public LossFunction {
public:
explicit CauchyLoss(double a) : b_(a * a), c_(1 / b_) { }
virtual void Evaluate(double, double*) const;
@@ -225,7 +225,7 @@ class CauchyLoss : public LossFunction {
// rho(s) = a atan(s / a).
//
// At s = 0: rho = [0, 1, 0].
class ArctanLoss : public LossFunction {
class CERES_EXPORT ArctanLoss : public LossFunction {
public:
explicit ArctanLoss(double a) : a_(a), b_(1 / (a * a)) { }
virtual void Evaluate(double, double*) const;
@@ -264,7 +264,7 @@ class ArctanLoss : public LossFunction {
// concentrated in the range a - b to a + b.
//
// At s = 0: rho = [0, ~0, ~0].
class TolerantLoss : public LossFunction {
class CERES_EXPORT TolerantLoss : public LossFunction {
public:
explicit TolerantLoss(double a, double b);
virtual void Evaluate(double, double*) const;
@@ -305,7 +305,7 @@ class ComposedLoss : public LossFunction {
// function, rho = NULL is a valid input and will result in the input
// being scaled by a. This provides a simple way of implementing a
// scaled ResidualBlock.
class ScaledLoss : public LossFunction {
class CERES_EXPORT ScaledLoss : public LossFunction {
public:
// Constructs a ScaledLoss wrapping another loss function. Takes
// ownership of the wrapped loss function or not depending on the
@@ -362,7 +362,7 @@ class ScaledLoss : public LossFunction {
//
// Solve(options, &problem, &summary)
//
class LossFunctionWrapper : public LossFunction {
class CERES_EXPORT LossFunctionWrapper : public LossFunction {
public:
LossFunctionWrapper(LossFunction* rho, Ownership ownership)
: rho_(rho), ownership_(ownership) {

View File

@@ -56,7 +56,7 @@ namespace ceres {
// which would be the case if the covariance matrix S is rank
// deficient.
class NormalPrior: public CostFunction {
class CERES_EXPORT NormalPrior: public CostFunction {
public:
// Check that the number of rows in the vector b are the same as the
// number of columns in the matrix A, crash otherwise.

View File

@@ -117,14 +117,14 @@ typedef internal::ResidualBlock* ResidualBlockId;
// problem.AddResidualBlock(new MyBinaryCostFunction(...), x2, x3);
//
// Please see cost_function.h for details of the CostFunction object.
class Problem {
class CERES_EXPORT Problem {
public:
struct Options {
struct CERES_EXPORT Options {
Options()
: cost_function_ownership(TAKE_OWNERSHIP),
loss_function_ownership(TAKE_OWNERSHIP),
local_parameterization_ownership(TAKE_OWNERSHIP),
enable_fast_parameter_block_removal(false),
enable_fast_removal(false),
disable_all_safety_checks(false) {}
// These flags control whether the Problem object owns the cost
@@ -138,17 +138,21 @@ class Problem {
Ownership loss_function_ownership;
Ownership local_parameterization_ownership;
// If true, trades memory for a faster RemoveParameterBlock() operation.
// If true, trades memory for faster RemoveResidualBlock() and
// RemoveParameterBlock() operations.
//
// RemoveParameterBlock() takes time proportional to the size of the entire
// Problem. If you only remove parameter blocks from the Problem
// occassionaly, this may be acceptable. However, if you are modifying the
// Problem frequently, and have memory to spare, then flip this switch to
// By default, RemoveParameterBlock() and RemoveResidualBlock() take time
// proportional to the size of the entire problem. If you only ever remove
// parameters or residuals from the problem occassionally, this might be
// acceptable. However, if you have memory to spare, enable this option to
// make RemoveParameterBlock() take time proportional to the number of
// residual blocks that depend on it. The increase in memory usage is an
// additonal hash set per parameter block containing all the residuals that
// depend on the parameter block.
bool enable_fast_parameter_block_removal;
// residual blocks that depend on it, and RemoveResidualBlock() take (on
// average) constant time.
//
// The increase in memory usage is twofold: an additonal hash set per
// parameter block containing all the residuals that depend on the parameter
// block; and a hash set in the problem containing all residuals.
bool enable_fast_removal;
// By default, Ceres performs a variety of safety checks when constructing
// the problem. There is a small but measurable performance penalty to
@@ -276,7 +280,7 @@ class Problem {
// residual blocks that depend on the parameter are also removed, as
// described above in RemoveResidualBlock().
//
// If Problem::Options::enable_fast_parameter_block_removal is true, then the
// If Problem::Options::enable_fast_removal is true, then the
// removal is fast (almost constant time). Otherwise, removing a parameter
// block will incur a scan of the entire Problem object.
//
@@ -300,7 +304,7 @@ class Problem {
// Hold the indicated parameter block constant during optimization.
void SetParameterBlockConstant(double* values);
// Allow the indicated parameter to vary during optimization.
// Allow the indicated parameter block to vary during optimization.
void SetParameterBlockVariable(double* values);
// Set the local parameterization for one of the parameter blocks.
@@ -312,6 +316,15 @@ class Problem {
void SetParameterization(double* values,
LocalParameterization* local_parameterization);
// Get the local parameterization object associated with this
// parameter block. If there is no parameterization object
// associated then NULL is returned.
const LocalParameterization* GetParameterization(double* values) const;
// Set the lower/upper bound for the parameter with position "index".
void SetParameterLowerBound(double* values, int index, double lower_bound);
void SetParameterUpperBound(double* values, int index, double upper_bound);
// Number of parameter blocks in the problem. Always equals
// parameter_blocks().size() and parameter_block_sizes().size().
int NumParameterBlocks() const;
@@ -336,6 +349,9 @@ class Problem {
// block, then ParameterBlockLocalSize = ParameterBlockSize.
int ParameterBlockLocalSize(const double* values) const;
// Is the given parameter block present in this problem or not?
bool HasParameterBlock(const double* values) const;
// Fills the passed parameter_blocks vector with pointers to the
// parameter blocks currently in the problem. After this call,
// parameter_block.size() == NumParameterBlocks.
@@ -353,7 +369,7 @@ class Problem {
// Get all the residual blocks that depend on the given parameter block.
//
// If Problem::Options::enable_fast_parameter_block_removal is true, then
// If Problem::Options::enable_fast_removal is true, then
// getting the residual blocks is fast and depends only on the number of
// residual blocks. Otherwise, getting the residual blocks for a parameter
// block will incur a scan of the entire Problem object.

View File

@@ -46,7 +46,7 @@ namespace ceres {
class Problem;
// Interface for non-linear least squares solvers.
class Solver {
class CERES_EXPORT Solver {
public:
virtual ~Solver();
@@ -55,7 +55,7 @@ class Solver {
// problems; however, better performance is often obtainable with tweaking.
//
// The constants are defined inside types.h
struct Options {
struct CERES_EXPORT Options {
// Default constructor that sets up a generic sparse problem.
Options() {
minimizer_type = TRUST_REGION;
@@ -107,15 +107,14 @@ class Solver {
num_linear_solver_threads = 1;
linear_solver_ordering = NULL;
use_postordering = false;
dynamic_sparsity = false;
min_linear_solver_iterations = 1;
max_linear_solver_iterations = 500;
eta = 1e-1;
jacobi_scaling = true;
use_inner_iterations = false;
inner_iteration_tolerance = 1e-3;
inner_iteration_ordering = NULL;
logging_type = PER_MINIMIZER_ITERATION;
minimizer_progress_to_stdout = false;
trust_region_problem_dump_directory = "/tmp";
@@ -126,7 +125,6 @@ class Solver {
update_state_every_iteration = false;
}
~Options();
// Minimizer options ----------------------------------------
// Ceres supports the two major families of optimization strategies -
@@ -367,7 +365,7 @@ class Solver {
// Minimizer terminates when
//
// max_i |gradient_i| < gradient_tolerance * max_i|initial_gradient_i|
// max_i |x - Project(Plus(x, -g(x))| < gradient_tolerance
//
// This value should typically be 1e-4 * function_tolerance.
double gradient_tolerance;
@@ -480,10 +478,7 @@ class Solver {
// the parameter blocks into two groups, one for the points and one
// for the cameras, where the group containing the points has an id
// smaller than the group containing cameras.
//
// Once assigned, Solver::Options owns this pointer and will
// deallocate the memory when destroyed.
ParameterBlockOrdering* linear_solver_ordering;
shared_ptr<ParameterBlockOrdering> linear_solver_ordering;
// Sparse Cholesky factorization algorithms use a fill-reducing
// ordering to permute the columns of the Jacobian matrix. There
@@ -506,6 +501,21 @@ class Solver {
// matrix. Setting use_postordering to true enables this tradeoff.
bool use_postordering;
// Some non-linear least squares problems are symbolically dense but
// numerically sparse. i.e. at any given state only a small number
// of jacobian entries are non-zero, but the position and number of
// non-zeros is different depending on the state. For these problems
// it can be useful to factorize the sparse jacobian at each solver
// iteration instead of including all of the zero entries in a single
// general factorization.
//
// If your problem does not have this property (or you do not know),
// then it is probably best to keep this false, otherwise it will
// likely lead to worse performance.
// This settings affects the SPARSE_NORMAL_CHOLESKY solver.
bool dynamic_sparsity;
// Some non-linear least squares problems have additional
// structure in the way the parameter blocks interact that it is
// beneficial to modify the way the trust region step is computed.
@@ -576,7 +586,7 @@ class Solver {
// the lower numbered groups are optimized before the higher
// number groups. Each group must be an independent set. Not
// all parameter blocks need to be present in the ordering.
ParameterBlockOrdering* inner_iteration_ordering;
shared_ptr<ParameterBlockOrdering> inner_iteration_ordering;
// Generally speaking, inner iterations make significant progress
// in the early stages of the solve and then their contribution
@@ -703,7 +713,7 @@ class Solver {
string solver_log;
};
struct Summary {
struct CERES_EXPORT Summary {
Summary();
// A brief one line description of the state of the solver after
@@ -941,7 +951,7 @@ class Solver {
};
// Helper function which avoids going through the interface.
void Solve(const Solver::Options& options,
CERES_EXPORT void Solve(const Solver::Options& options,
Problem* problem,
Solver::Summary* summary);

View File

@@ -45,7 +45,6 @@ namespace ceres {
// Basic integer types. These typedefs are in the Ceres namespace to avoid
// conflicts with other packages having similar typedefs.
typedef short int16;
typedef int int32;
// Argument type used in interfaces that can optionally take ownership
@@ -306,7 +305,7 @@ enum TerminationType {
// by the user was satisfied.
//
// 1. (new_cost - old_cost) < function_tolerance * old_cost;
// 2. max_i |gradient_i| < gradient_tolerance * max_i|initial_gradient_i|
// 2. max_i |gradient_i| < gradient_tolerance
// 3. |step|_2 <= parameter_tolerance * ( |x|_2 + parameter_tolerance)
//
// The user's parameter blocks will be updated with the solution.
@@ -379,9 +378,9 @@ enum DumpFormatType {
TEXTFILE
};
// For SizedCostFunction and AutoDiffCostFunction, DYNAMIC can be specified for
// the number of residuals. If specified, then the number of residuas for that
// cost function can vary at runtime.
// For SizedCostFunction and AutoDiffCostFunction, DYNAMIC can be
// specified for the number of residuals. If specified, then the
// number of residuas for that cost function can vary at runtime.
enum DimensionType {
DYNAMIC = -1
};
@@ -403,69 +402,75 @@ enum CovarianceAlgorithmType {
SPARSE_QR
};
const char* LinearSolverTypeToString(LinearSolverType type);
bool StringToLinearSolverType(string value, LinearSolverType* type);
CERES_EXPORT const char* LinearSolverTypeToString(
LinearSolverType type);
CERES_EXPORT bool StringToLinearSolverType(string value,
LinearSolverType* type);
const char* PreconditionerTypeToString(PreconditionerType type);
bool StringToPreconditionerType(string value, PreconditionerType* type);
CERES_EXPORT const char* PreconditionerTypeToString(PreconditionerType type);
CERES_EXPORT bool StringToPreconditionerType(string value,
PreconditionerType* type);
const char* VisibilityClusteringTypeToString(VisibilityClusteringType type);
bool StringToVisibilityClusteringType(string value,
CERES_EXPORT const char* VisibilityClusteringTypeToString(
VisibilityClusteringType type);
CERES_EXPORT bool StringToVisibilityClusteringType(string value,
VisibilityClusteringType* type);
const char* SparseLinearAlgebraLibraryTypeToString(
CERES_EXPORT const char* SparseLinearAlgebraLibraryTypeToString(
SparseLinearAlgebraLibraryType type);
bool StringToSparseLinearAlgebraLibraryType(
CERES_EXPORT bool StringToSparseLinearAlgebraLibraryType(
string value,
SparseLinearAlgebraLibraryType* type);
const char* DenseLinearAlgebraLibraryTypeToString(
CERES_EXPORT const char* DenseLinearAlgebraLibraryTypeToString(
DenseLinearAlgebraLibraryType type);
bool StringToDenseLinearAlgebraLibraryType(
CERES_EXPORT bool StringToDenseLinearAlgebraLibraryType(
string value,
DenseLinearAlgebraLibraryType* type);
const char* TrustRegionStrategyTypeToString(TrustRegionStrategyType type);
bool StringToTrustRegionStrategyType(string value,
CERES_EXPORT const char* TrustRegionStrategyTypeToString(
TrustRegionStrategyType type);
CERES_EXPORT bool StringToTrustRegionStrategyType(string value,
TrustRegionStrategyType* type);
const char* DoglegTypeToString(DoglegType type);
bool StringToDoglegType(string value, DoglegType* type);
CERES_EXPORT const char* DoglegTypeToString(DoglegType type);
CERES_EXPORT bool StringToDoglegType(string value, DoglegType* type);
const char* MinimizerTypeToString(MinimizerType type);
bool StringToMinimizerType(string value, MinimizerType* type);
CERES_EXPORT const char* MinimizerTypeToString(MinimizerType type);
CERES_EXPORT bool StringToMinimizerType(string value, MinimizerType* type);
const char* LineSearchDirectionTypeToString(LineSearchDirectionType type);
bool StringToLineSearchDirectionType(string value,
CERES_EXPORT const char* LineSearchDirectionTypeToString(
LineSearchDirectionType type);
CERES_EXPORT bool StringToLineSearchDirectionType(string value,
LineSearchDirectionType* type);
const char* LineSearchTypeToString(LineSearchType type);
bool StringToLineSearchType(string value, LineSearchType* type);
CERES_EXPORT const char* LineSearchTypeToString(LineSearchType type);
CERES_EXPORT bool StringToLineSearchType(string value, LineSearchType* type);
const char* NonlinearConjugateGradientTypeToString(
CERES_EXPORT const char* NonlinearConjugateGradientTypeToString(
NonlinearConjugateGradientType type);
bool StringToNonlinearConjugateGradientType(
CERES_EXPORT bool StringToNonlinearConjugateGradientType(
string value,
NonlinearConjugateGradientType* type);
const char* LineSearchInterpolationTypeToString(
CERES_EXPORT const char* LineSearchInterpolationTypeToString(
LineSearchInterpolationType type);
bool StringToLineSearchInterpolationType(
CERES_EXPORT bool StringToLineSearchInterpolationType(
string value,
LineSearchInterpolationType* type);
const char* CovarianceAlgorithmTypeToString(
CERES_EXPORT const char* CovarianceAlgorithmTypeToString(
CovarianceAlgorithmType type);
bool StringToCovarianceAlgorithmType(
CERES_EXPORT bool StringToCovarianceAlgorithmType(
string value,
CovarianceAlgorithmType* type);
const char* TerminationTypeToString(TerminationType type);
CERES_EXPORT const char* TerminationTypeToString(TerminationType type);
bool IsSchurType(LinearSolverType type);
bool IsSparseLinearAlgebraLibraryTypeAvailable(
CERES_EXPORT bool IsSchurType(LinearSolverType type);
CERES_EXPORT bool IsSparseLinearAlgebraLibraryTypeAvailable(
SparseLinearAlgebraLibraryType type);
bool IsDenseLinearAlgebraLibraryTypeAvailable(
CERES_EXPORT bool IsDenseLinearAlgebraLibraryTypeAvailable(
DenseLinearAlgebraLibraryType type);
} // namespace ceres

View File

@@ -0,0 +1,287 @@
# Ceres Solver - A fast non-linear least squares minimizer
# Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
# http://code.google.com/p/ceres-solver/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Google Inc. nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: keir@google.com (Keir Mierle)
SET(CERES_INTERNAL_SRC
array_utils.cc
blas.cc
block_evaluate_preparer.cc
block_jacobi_preconditioner.cc
block_jacobian_writer.cc
block_random_access_dense_matrix.cc
block_random_access_diagonal_matrix.cc
block_random_access_matrix.cc
block_random_access_sparse_matrix.cc
block_sparse_matrix.cc
block_structure.cc
c_api.cc
canonical_views_clustering.cc
cgnr_solver.cc
compressed_col_sparse_matrix_utils.cc
compressed_row_jacobian_writer.cc
compressed_row_sparse_matrix.cc
conditioned_cost_function.cc
conjugate_gradients_solver.cc
coordinate_descent_minimizer.cc
corrector.cc
covariance.cc
covariance_impl.cc
cxsparse.cc
dense_normal_cholesky_solver.cc
dense_qr_solver.cc
dense_sparse_matrix.cc
detect_structure.cc
dogleg_strategy.cc
dynamic_compressed_row_jacobian_writer.cc
dynamic_compressed_row_sparse_matrix.cc
evaluator.cc
file.cc
gradient_checking_cost_function.cc
implicit_schur_complement.cc
incomplete_lq_factorization.cc
iterative_schur_complement_solver.cc
levenberg_marquardt_strategy.cc
lapack.cc
line_search.cc
line_search_direction.cc
line_search_minimizer.cc
linear_least_squares_problems.cc
linear_operator.cc
linear_solver.cc
local_parameterization.cc
loss_function.cc
low_rank_inverse_hessian.cc
minimizer.cc
normal_prior.cc
parameter_block_ordering.cc
partitioned_matrix_view.cc
polynomial.cc
preconditioner.cc
problem.cc
problem_impl.cc
program.cc
residual_block.cc
residual_block_utils.cc
schur_complement_solver.cc
schur_eliminator.cc
schur_jacobi_preconditioner.cc
scratch_evaluate_preparer.cc
single_linkage_clustering.cc
solver.cc
solver_impl.cc
sparse_matrix.cc
sparse_normal_cholesky_solver.cc
split.cc
stringprintf.cc
suitesparse.cc
triplet_sparse_matrix.cc
trust_region_minimizer.cc
trust_region_strategy.cc
types.cc
visibility.cc
visibility_based_preconditioner.cc
wall_time.cc
)
# Heuristic for determining LIB_SUFFIX. FHS recommends that 64-bit systems
# install native libraries to lib64 rather than lib. Most distros seem to
# follow this convention with a couple notable exceptions (Debian-based and
# Arch-based distros) which we try to detect here.
IF (CMAKE_SYSTEM_NAME MATCHES "Linux" AND
NOT DEFINED LIB_SUFFIX AND
NOT CMAKE_CROSSCOMPILING AND
CMAKE_SIZEOF_VOID_P EQUAL "8" AND
NOT EXISTS "/etc/debian_version" AND
NOT EXISTS "/etc/arch-release")
SET(LIB_SUFFIX "64")
ENDIF ()
# Also depend on the header files so that they appear in IDEs.
FILE(GLOB CERES_INTERNAL_HDRS *.h)
# Include the specialized schur solvers.
IF (SCHUR_SPECIALIZATIONS)
FILE(GLOB CERES_INTERNAL_SCHUR_FILES generated/*.cc)
ELSE (SCHUR_SPECIALIZATIONS)
# Only the fully dynamic solver. The build is much faster this way.
FILE(GLOB CERES_INTERNAL_SCHUR_FILES generated/*_d_d_d.cc)
ENDIF (SCHUR_SPECIALIZATIONS)
# Primarily for Android, but optionally for others, use the minimal internal
# Glog implementation.
IF (MINIGLOG)
ADD_LIBRARY(miniglog STATIC miniglog/glog/logging.cc)
INSTALL(TARGETS miniglog
EXPORT CeresExport
RUNTIME DESTINATION bin
LIBRARY DESTINATION lib${LIB_SUFFIX}
ARCHIVE DESTINATION lib${LIB_SUFFIX})
ENDIF (MINIGLOG)
SET(CERES_LIBRARY_PUBLIC_DEPENDENCIES ${GLOG_LIBRARIES})
IF (SUITESPARSE AND SUITESPARSE_FOUND)
LIST(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES ${SUITESPARSE_LIBRARIES})
ENDIF (SUITESPARSE AND SUITESPARSE_FOUND)
IF (CXSPARSE AND CXSPARSE_FOUND)
LIST(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES ${CXSPARSE_LIBRARIES})
ENDIF (CXSPARSE AND CXSPARSE_FOUND)
IF (BLAS_FOUND AND LAPACK_FOUND)
LIST(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES ${LAPACK_LIBRARIES})
LIST(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES ${BLAS_LIBRARIES})
ENDIF (BLAS_FOUND AND LAPACK_FOUND)
IF (OPENMP_FOUND)
IF (NOT MSVC)
LIST(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES gomp)
LIST(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES ${CMAKE_THREAD_LIBS_INIT})
ENDIF (NOT MSVC)
ENDIF (OPENMP_FOUND)
SET(CERES_LIBRARY_SOURCE
${CERES_INTERNAL_SRC}
${CERES_INTERNAL_HDRS}
${CERES_INTERNAL_SCHUR_FILES})
ADD_LIBRARY(ceres ${CERES_LIBRARY_SOURCE})
SET_TARGET_PROPERTIES(ceres PROPERTIES
VERSION ${CERES_VERSION}
SOVERSION ${CERES_VERSION_MAJOR}
)
IF (BUILD_SHARED_LIBS)
# When building a shared library, mark all external libraries as
# PRIVATE so they don't show up as a dependency.
TARGET_LINK_LIBRARIES(ceres
LINK_PUBLIC ${CERES_LIBRARY_PUBLIC_DEPENDENCIES}
LINK_PRIVATE ${CERES_LIBRARY_PRIVATE_DEPENDENCIES})
ELSE (BUILD_SHARED_LIBS)
# When building a static library, all external libraries are
# PUBLIC(default) since the user needs to link to them.
# They will be listed in CeresTargets.cmake.
SET(CERES_LIBRARY_DEPENDENCIES
${CERES_LIBRARY_PUBLIC_DEPENDENCIES}
${CERES_LIBRARY_PRIVATE_DEPENDENCIES})
TARGET_LINK_LIBRARIES(ceres ${CERES_LIBRARY_DEPENDENCIES})
ENDIF (BUILD_SHARED_LIBS)
INSTALL(TARGETS ceres
EXPORT CeresExport
RUNTIME DESTINATION bin
LIBRARY DESTINATION lib${LIB_SUFFIX}
ARCHIVE DESTINATION lib${LIB_SUFFIX})
IF (BUILD_TESTING AND GFLAGS)
ADD_LIBRARY(gtest gmock_gtest_all.cc gmock_main.cc)
ADD_LIBRARY(test_util
evaluator_test_utils.cc
numeric_diff_test_utils.cc
test_util.cc)
TARGET_LINK_LIBRARIES(gtest ${GFLAGS_LIBRARIES} ${GLOG_LIBRARIES})
TARGET_LINK_LIBRARIES(test_util ceres gtest ${GLOG_LIBRARIES})
MACRO (CERES_TEST NAME)
ADD_EXECUTABLE(${NAME}_test ${NAME}_test.cc)
TARGET_LINK_LIBRARIES(${NAME}_test test_util ceres gtest)
ADD_TEST(NAME ${NAME}_test
COMMAND ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${NAME}_test
--test_srcdir
${CMAKE_SOURCE_DIR}/data)
ENDMACRO (CERES_TEST)
CERES_TEST(array_utils)
CERES_TEST(autodiff)
CERES_TEST(autodiff_cost_function)
CERES_TEST(autodiff_local_parameterization)
CERES_TEST(block_random_access_dense_matrix)
CERES_TEST(block_random_access_diagonal_matrix)
CERES_TEST(block_random_access_sparse_matrix)
CERES_TEST(block_sparse_matrix)
CERES_TEST(c_api)
CERES_TEST(canonical_views_clustering)
CERES_TEST(compressed_row_sparse_matrix)
CERES_TEST(conditioned_cost_function)
CERES_TEST(corrector)
CERES_TEST(cost_function_to_functor)
CERES_TEST(covariance)
CERES_TEST(dense_sparse_matrix)
CERES_TEST(dynamic_autodiff_cost_function)
CERES_TEST(dynamic_compressed_row_sparse_matrix)
CERES_TEST(dynamic_numeric_diff_cost_function)
CERES_TEST(evaluator)
CERES_TEST(gradient_checker)
CERES_TEST(gradient_checking_cost_function)
CERES_TEST(graph)
CERES_TEST(graph_algorithms)
CERES_TEST(implicit_schur_complement)
CERES_TEST(incomplete_lq_factorization)
CERES_TEST(iterative_schur_complement_solver)
CERES_TEST(jet)
CERES_TEST(levenberg_marquardt_strategy)
CERES_TEST(dogleg_strategy)
CERES_TEST(local_parameterization)
CERES_TEST(loss_function)
CERES_TEST(minimizer)
CERES_TEST(normal_prior)
CERES_TEST(numeric_diff_cost_function)
CERES_TEST(numeric_diff_functor)
CERES_TEST(ordered_groups)
CERES_TEST(parameter_block)
CERES_TEST(parameter_block_ordering)
CERES_TEST(partitioned_matrix_view)
CERES_TEST(polynomial)
CERES_TEST(problem)
CERES_TEST(residual_block)
CERES_TEST(residual_block_utils)
CERES_TEST(rotation)
CERES_TEST(schur_complement_solver)
CERES_TEST(schur_eliminator)
CERES_TEST(single_linkage_clustering)
CERES_TEST(small_blas)
CERES_TEST(solver_impl)
# TODO(sameeragarwal): This test should ultimately be made
# independent of SuiteSparse.
IF (SUITESPARSE AND SUITESPARSE_FOUND)
CERES_TEST(compressed_col_sparse_matrix_utils)
ENDIF (SUITESPARSE AND SUITESPARSE_FOUND)
CERES_TEST(symmetric_linear_solver)
CERES_TEST(triplet_sparse_matrix)
CERES_TEST(trust_region_minimizer)
CERES_TEST(unsymmetric_linear_solver)
CERES_TEST(visibility)
CERES_TEST(visibility_based_preconditioner)
# Put the large end to end test last.
CERES_TEST(system)
ENDIF (BUILD_TESTING AND GFLAGS)

View File

@@ -32,7 +32,10 @@
#include <cmath>
#include <cstddef>
#include <string>
#include "ceres/fpclassify.h"
#include "ceres/stringprintf.h"
namespace ceres {
namespace internal {
@@ -55,6 +58,20 @@ bool IsArrayValid(const int size, const double* x) {
return true;
}
int FindInvalidValue(const int size, const double* x) {
if (x == NULL) {
return size;
}
for (int i = 0; i < size; ++i) {
if (!IsFinite(x[i]) || (x[i] == kImpossibleValue)) {
return i;
}
}
return size;
};
void InvalidateArray(const int size, double* x) {
if (x != NULL) {
for (int i = 0; i < size; ++i) {
@@ -63,5 +80,19 @@ void InvalidateArray(const int size, double* x) {
}
}
void AppendArrayToString(const int size, const double* x, string* result) {
for (int i = 0; i < size; ++i) {
if (x == NULL) {
StringAppendF(result, "Not Computed ");
} else {
if (x[i] == kImpossibleValue) {
StringAppendF(result, "Uninitialized ");
} else {
StringAppendF(result, "%12g ", x[i]);
}
}
}
}
} // namespace internal
} // namespace ceres

View File

@@ -57,6 +57,14 @@ void InvalidateArray(int size, double* x);
// equal to the "impossible" value used by InvalidateArray.
bool IsArrayValid(int size, const double* x);
// If the array contains an invalid value, return the index for it,
// otherwise return size.
int FindInvalidValue(const int size, const double* x);
// Utility routine to print an array of doubles to a string. If the
// array pointer is NULL, it is treated as an array of zeros.
void AppendArrayToString(const int size, const double* x, string* result);
extern const double kImpossibleValue;
} // namespace internal

View File

@@ -29,6 +29,7 @@
// Author: sameeragarwal@google.com (Sameer Agarwal)
#include "ceres/blas.h"
#include "ceres/internal/port.h"
#include "glog/logging.h"
extern "C" void dsyrk_(char* uplo,

View File

@@ -1,170 +0,0 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#include "ceres/block_random_access_crs_matrix.h"
#include <algorithm>
#include <set>
#include <utility>
#include <vector>
#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/mutex.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
BlockRandomAccessCRSMatrix::BlockRandomAccessCRSMatrix(
const vector<int>& blocks,
const set<pair<int, int> >& block_pairs)
: kMaxRowBlocks(10 * 1000 * 1000),
blocks_(blocks) {
CHECK_LT(blocks.size(), kMaxRowBlocks);
col_layout_.resize(blocks_.size(), 0);
row_strides_.resize(blocks_.size(), 0);
// Build the row/column layout vector and count the number of scalar
// rows/columns.
int num_cols = 0;
for (int i = 0; i < blocks_.size(); ++i) {
col_layout_[i] = num_cols;
num_cols += blocks_[i];
}
// Walk the sparsity pattern and count the number of non-zeros.
int num_nonzeros = 0;
for (set<pair<int, int> >::const_iterator it = block_pairs.begin();
it != block_pairs.end();
++it) {
const int row_block_size = blocks_[it->first];
const int col_block_size = blocks_[it->second];
num_nonzeros += row_block_size * col_block_size;
}
VLOG(2) << "Matrix Size [" << num_cols
<< "," << num_cols
<< "] " << num_nonzeros;
crsm_.reset(new CompressedRowSparseMatrix(num_cols, num_cols, num_nonzeros));
int* rows = crsm_->mutable_rows();
int* cols = crsm_->mutable_cols();
double* values = crsm_->mutable_values();
// Iterate over the sparsity pattern and fill the scalar sparsity
// pattern of the underlying compressed sparse row matrix. Along the
// way also fill out the Layout object which will allow random
// access into the CRS Matrix.
set<pair<int, int> >::const_iterator it = block_pairs.begin();
vector<int> col_blocks;
int row_pos = 0;
rows[0] = 0;
while (it != block_pairs.end()) {
// Add entries to layout_ for all the blocks for this row.
col_blocks.clear();
const int row_block_id = it->first;
const int row_block_size = blocks_[row_block_id];
int num_cols = 0;
while ((it != block_pairs.end()) && (it->first == row_block_id)) {
layout_[IntPairToLong(it->first, it->second)] =
new CellInfo(values + num_cols);
col_blocks.push_back(it->second);
num_cols += blocks_[it->second];
++it;
};
// Count the number of non-zeros in the row block.
for (int j = 0; j < row_block_size; ++j) {
rows[row_pos + j + 1] = rows[row_pos + j] + num_cols;
}
// Fill out the sparsity pattern for each row.
int col_pos = 0;
for (int j = 0; j < col_blocks.size(); ++j) {
const int col_block_id = col_blocks[j];
const int col_block_size = blocks_[col_block_id];
for (int r = 0; r < row_block_size; ++r) {
const int column_block_begin = rows[row_pos + r] + col_pos;
for (int c = 0; c < col_block_size; ++c) {
cols[column_block_begin + c] = col_layout_[col_block_id] + c;
}
}
col_pos += col_block_size;
}
row_pos += row_block_size;
values += row_block_size * num_cols;
row_strides_[row_block_id] = num_cols;
}
}
// Assume that the user does not hold any locks on any cell blocks
// when they are calling SetZero.
BlockRandomAccessCRSMatrix::~BlockRandomAccessCRSMatrix() {
// TODO(sameeragarwal) this should be rationalized going forward and
// perhaps moved into BlockRandomAccessMatrix.
for (LayoutType::iterator it = layout_.begin();
it != layout_.end();
++it) {
delete it->second;
}
}
CellInfo* BlockRandomAccessCRSMatrix::GetCell(int row_block_id,
int col_block_id,
int* row,
int* col,
int* row_stride,
int* col_stride) {
const LayoutType::iterator it =
layout_.find(IntPairToLong(row_block_id, col_block_id));
if (it == layout_.end()) {
return NULL;
}
*row = 0;
*col = 0;
*row_stride = blocks_[row_block_id];
*col_stride = row_strides_[row_block_id];
return it->second;
}
// Assume that the user does not hold any locks on any cell blocks
// when they are calling SetZero.
void BlockRandomAccessCRSMatrix::SetZero() {
crsm_->SetZero();
}
} // namespace internal
} // namespace ceres

View File

@@ -1,108 +0,0 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#ifndef CERES_INTERNAL_BLOCK_RANDOM_ACCESS_CRS_MATRIX_H_
#define CERES_INTERNAL_BLOCK_RANDOM_ACCESS_CRS_MATRIX_H_
#include <set>
#include <vector>
#include <utility>
#include "ceres/mutex.h"
#include "ceres/block_random_access_matrix.h"
#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/collections_port.h"
#include "ceres/integral_types.h"
#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
namespace ceres {
namespace internal {
// A square BlockRandomAccessMatrix where the underlying storage is a
// compressed row sparse matrix. The matrix need not be symmetric.
class BlockRandomAccessCRSMatrix : public BlockRandomAccessMatrix {
public:
// blocks is an array of block sizes. block_pairs is a set of
// <row_block_id, col_block_id> pairs to identify the non-zero cells
// of this matrix.
BlockRandomAccessCRSMatrix(const vector<int>& blocks,
const set<pair<int, int> >& block_pairs);
// The destructor is not thread safe. It assumes that no one is
// modifying any cells when the matrix is being destroyed.
virtual ~BlockRandomAccessCRSMatrix();
// BlockRandomAccessMatrix Interface.
virtual CellInfo* GetCell(int row_block_id,
int col_block_id,
int* row,
int* col,
int* row_stride,
int* col_stride);
// This is not a thread safe method, it assumes that no cell is
// locked.
virtual void SetZero();
// Since the matrix is square, num_rows() == num_cols().
virtual int num_rows() const { return crsm_->num_rows(); }
virtual int num_cols() const { return crsm_->num_cols(); }
// Access to the underlying matrix object.
const CompressedRowSparseMatrix* matrix() const { return crsm_.get(); }
CompressedRowSparseMatrix* mutable_matrix() { return crsm_.get(); }
private:
int64 IntPairToLong(int a, int b) {
return a * kMaxRowBlocks + b;
}
const int64 kMaxRowBlocks;
// row/column block sizes.
const vector<int> blocks_;
vector<int> col_layout_;
vector<int> row_strides_;
// A mapping from <row_block_id, col_block_id> to the position in
// the values array of tsm_ where the block is stored.
typedef HashMap<long int, CellInfo* > LayoutType;
LayoutType layout_;
scoped_ptr<CompressedRowSparseMatrix> crsm_;
friend class BlockRandomAccessCRSMatrixTest;
CERES_DISALLOW_COPY_AND_ASSIGN(BlockRandomAccessCRSMatrix);
};
} // namespace internal
} // namespace ceres
#endif // CERES_INTERNAL_BLOCK_RANDOM_ACCESS_CRS_MATRIX_H_

View File

@@ -34,6 +34,9 @@ namespace ceres {
namespace internal {
bool CellLessThan(const Cell& lhs, const Cell& rhs) {
if (lhs.block_id == rhs.block_id) {
return (lhs.position < rhs.position);
}
return (lhs.block_id < rhs.block_id);
}

View File

@@ -45,8 +45,6 @@
namespace ceres {
namespace internal {
class BlockStructureProto;
typedef int32 BlockSize;
struct Block {
@@ -89,16 +87,6 @@ struct CompressedColumnBlockStructure {
vector<CompressedColumn> cols;
};
// Deserialize the given block structure proto to the given block structure.
// Destroys previous contents of block_structure.
void ProtoToBlockStructure(const BlockStructureProto &proto,
CompressedRowBlockStructure *block_structure);
// Serialize the given block structure to the given proto. Destroys previous
// contents of proto.
void BlockStructureToProto(const CompressedRowBlockStructure &block_structure,
BlockStructureProto *proto);
} // namespace internal
} // namespace ceres

View File

@@ -29,6 +29,9 @@
// Author: David Gallup (dgallup@google.com)
// Sameer Agarwal (sameeragarwal@google.com)
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_NO_SUITESPARSE
#include "ceres/canonical_views_clustering.h"

View File

@@ -41,6 +41,9 @@
#ifndef CERES_INTERNAL_CANONICAL_VIEWS_CLUSTERING_H_
#define CERES_INTERNAL_CANONICAL_VIEWS_CLUSTERING_H_
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_NO_SUITESPARSE
#include <vector>

View File

@@ -33,6 +33,8 @@
#ifndef CERES_INTERNAL_COLLECTIONS_PORT_H_
#define CERES_INTERNAL_COLLECTIONS_PORT_H_
#include "ceres/internal/port.h"
#if defined(CERES_NO_UNORDERED_MAP)
# include <map>
# include <set>

View File

@@ -40,6 +40,44 @@
namespace ceres {
namespace internal {
void CompressedRowJacobianWriter::PopulateJacobianRowAndColumnBlockVectors(
const Program* program, CompressedRowSparseMatrix* jacobian) {
const vector<ParameterBlock*>& parameter_blocks =
program->parameter_blocks();
vector<int>& col_blocks = *(jacobian->mutable_col_blocks());
col_blocks.resize(parameter_blocks.size());
for (int i = 0; i < parameter_blocks.size(); ++i) {
col_blocks[i] = parameter_blocks[i]->LocalSize();
}
const vector<ResidualBlock*>& residual_blocks =
program->residual_blocks();
vector<int>& row_blocks = *(jacobian->mutable_row_blocks());
row_blocks.resize(residual_blocks.size());
for (int i = 0; i < residual_blocks.size(); ++i) {
row_blocks[i] = residual_blocks[i]->NumResiduals();
}
}
void CompressedRowJacobianWriter::GetOrderedParameterBlocks(
const Program* program,
int residual_id,
vector<pair<int, int> >* evaluated_jacobian_blocks) {
const ResidualBlock* residual_block =
program->residual_blocks()[residual_id];
const int num_parameter_blocks = residual_block->NumParameterBlocks();
for (int j = 0; j < num_parameter_blocks; ++j) {
const ParameterBlock* parameter_block =
residual_block->parameter_blocks()[j];
if (!parameter_block->IsConstant()) {
evaluated_jacobian_blocks->push_back(
make_pair(parameter_block->index(), j));
}
}
sort(evaluated_jacobian_blocks->begin(), evaluated_jacobian_blocks->end());
}
SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
const vector<ResidualBlock*>& residual_blocks =
program_->residual_blocks();
@@ -71,7 +109,7 @@ SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
total_num_effective_parameters,
num_jacobian_nonzeros + total_num_effective_parameters);
// At this stage, the CompressedSparseMatrix is an invalid state. But this
// At this stage, the CompressedRowSparseMatrix is an invalid state. But this
// seems to be the only way to construct it without doing a memory copy.
int* rows = jacobian->mutable_rows();
int* cols = jacobian->mutable_cols();
@@ -132,22 +170,7 @@ SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
}
CHECK_EQ(num_jacobian_nonzeros, rows[total_num_residuals]);
// Populate the row and column block vectors for use by block
// oriented ordering algorithms. This is useful when
// Solver::Options::use_block_amd = true.
const vector<ParameterBlock*>& parameter_blocks =
program_->parameter_blocks();
vector<int>& col_blocks = *(jacobian->mutable_col_blocks());
col_blocks.resize(parameter_blocks.size());
for (int i = 0; i < parameter_blocks.size(); ++i) {
col_blocks[i] = parameter_blocks[i]->LocalSize();
}
vector<int>& row_blocks = *(jacobian->mutable_row_blocks());
row_blocks.resize(residual_blocks.size());
for (int i = 0; i < residual_blocks.size(); ++i) {
row_blocks[i] = residual_blocks[i]->NumResiduals();
}
PopulateJacobianRowAndColumnBlockVectors(program_, jacobian);
return jacobian;
}
@@ -164,25 +187,10 @@ void CompressedRowJacobianWriter::Write(int residual_id,
const ResidualBlock* residual_block =
program_->residual_blocks()[residual_id];
const int num_parameter_blocks = residual_block->NumParameterBlocks();
const int num_residuals = residual_block->NumResiduals();
// It is necessary to determine the order of the jacobian blocks before
// copying them into the CompressedRowSparseMatrix. Just because a cost
// function uses parameter blocks 1 after 2 in its arguments does not mean
// that the block 1 occurs before block 2 in the column layout of the
// jacobian. Thus, determine the order by sorting the jacobian blocks by their
// position in the state vector.
vector<pair<int, int> > evaluated_jacobian_blocks;
for (int j = 0; j < num_parameter_blocks; ++j) {
const ParameterBlock* parameter_block =
residual_block->parameter_blocks()[j];
if (!parameter_block->IsConstant()) {
evaluated_jacobian_blocks.push_back(
make_pair(parameter_block->index(), j));
}
}
sort(evaluated_jacobian_blocks.begin(), evaluated_jacobian_blocks.end());
GetOrderedParameterBlocks(program_, residual_id, &evaluated_jacobian_blocks);
// Where in the current row does the jacobian for a parameter block begin.
int col_pos = 0;

View File

@@ -39,6 +39,7 @@
namespace ceres {
namespace internal {
class CompressedRowSparseMatrix;
class Program;
class SparseMatrix;
@@ -49,11 +50,44 @@ class CompressedRowJacobianWriter {
: program_(program) {
}
// PopulateJacobianRowAndColumnBlockVectors sets col_blocks and
// row_blocks for a CompressedRowSparseMatrix, based on the
// parameter block sizes and residual sizes respectively from the
// program. This is useful when Solver::Options::use_block_amd =
// true;
//
// This function is static so that it is available to other jacobian
// writers which use CompressedRowSparseMatrix (or derived types).
// (Jacobian writers do not fall under any type hierarchy; they only
// have to provide an interface as specified in program_evaluator.h).
static void PopulateJacobianRowAndColumnBlockVectors(
const Program* program,
CompressedRowSparseMatrix* jacobian);
// It is necessary to determine the order of the jacobian blocks
// before copying them into a CompressedRowSparseMatrix (or derived
// type). Just because a cost function uses parameter blocks 1
// after 2 in its arguments does not mean that the block 1 occurs
// before block 2 in the column layout of the jacobian. Thus,
// GetOrderedParameterBlocks determines the order by sorting the
// jacobian blocks by their position in the state vector.
//
// This function is static so that it is available to other jacobian
// writers which use CompressedRowSparseMatrix (or derived types).
// (Jacobian writers do not fall under any type hierarchy; they only
// have to provide an interface as specified in
// program_evaluator.h).
static void GetOrderedParameterBlocks(
const Program* program,
int residual_id,
vector<pair<int, int> >* evaluated_jacobian_blocks);
// JacobianWriter interface.
// Since the compressed row matrix has different layout than that assumed by
// the cost functions, use scratch space to store the jacobians temporarily
// then copy them over to the larger jacobian in the Write() function.
// Since the compressed row matrix has different layout than that
// assumed by the cost functions, use scratch space to store the
// jacobians temporarily then copy them over to the larger jacobian
// in the Write() function.
ScratchEvaluatePreparer* CreateEvaluatePreparers(int num_threads) {
return ScratchEvaluatePreparer::Create(*program_, num_threads);
}

View File

@@ -286,6 +286,13 @@ void CompressedRowSparseMatrix::ToCRSMatrix(CRSMatrix* matrix) const {
matrix->values.resize(matrix->rows[matrix->num_rows]);
}
void CompressedRowSparseMatrix::SetMaxNumNonZeros(int num_nonzeros) {
CHECK_GE(num_nonzeros, 0);
cols_.resize(num_nonzeros);
values_.resize(num_nonzeros);
}
void CompressedRowSparseMatrix::SolveLowerTriangularInPlace(
double* solution) const {
for (int r = 0; r < num_rows_; ++r) {
@@ -377,6 +384,9 @@ CompressedRowSparseMatrix* CompressedRowSparseMatrix::Transpose() const {
}
transpose_rows[0] = 0;
*(transpose->mutable_row_blocks()) = col_blocks_;
*(transpose->mutable_col_blocks()) = row_blocks_;
return transpose;
}

View File

@@ -115,6 +115,9 @@ class CompressedRowSparseMatrix : public SparseMatrix {
const vector<int>& col_blocks() const { return col_blocks_; }
vector<int>* mutable_col_blocks() { return &col_blocks_; }
// Destructive array resizing method.
void SetMaxNumNonZeros(int num_nonzeros);
// Non-destructive array resizing method.
void set_num_rows(const int num_rows) { num_rows_ = num_rows; }
void set_num_cols(const int num_cols) { num_cols_ = num_cols; }

View File

@@ -28,6 +28,9 @@
//
// Author: strandmark@google.com (Petter Strandmark)
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_NO_CXSPARSE
#include "ceres/cxsparse.h"

View File

@@ -31,6 +31,9 @@
#ifndef CERES_INTERNAL_CXSPARSE_H_
#define CERES_INTERNAL_CXSPARSE_H_
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_NO_CXSPARSE
#include <vector>

View File

@@ -0,0 +1,51 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: richie.stebbing@gmail.com (Richard Stebbing)
#ifndef CERES_INTERNAL_DYNAMIC_COMPRESED_ROW_FINALIZER_H_
#define CERES_INTERNAL_DYNAMIC_COMPRESED_ROW_FINALIZER_H_
#include "ceres/casts.h"
#include "ceres/dynamic_compressed_row_sparse_matrix.h"
namespace ceres {
namespace internal {
struct DynamicCompressedRowJacobianFinalizer {
void operator()(SparseMatrix* base_jacobian, int num_parameters) {
DynamicCompressedRowSparseMatrix* jacobian =
down_cast<DynamicCompressedRowSparseMatrix*>(base_jacobian);
jacobian->Finalize(num_parameters);
}
};
} // namespace internal
} // namespace ceres
#endif // CERES_INTERNAL_DYNAMIC_COMPRESED_ROW_FINALISER_H_

View File

@@ -0,0 +1,107 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: richie.stebbing@gmail.com (Richard Stebbing)
#include "ceres/compressed_row_jacobian_writer.h"
#include "ceres/dynamic_compressed_row_jacobian_writer.h"
#include "ceres/casts.h"
#include "ceres/dynamic_compressed_row_sparse_matrix.h"
#include "ceres/parameter_block.h"
#include "ceres/program.h"
#include "ceres/residual_block.h"
namespace ceres {
namespace internal {
ScratchEvaluatePreparer*
DynamicCompressedRowJacobianWriter::CreateEvaluatePreparers(int num_threads) {
return ScratchEvaluatePreparer::Create(*program_, num_threads);
}
SparseMatrix* DynamicCompressedRowJacobianWriter::CreateJacobian() const {
// Initialize `jacobian` with zero number of `max_num_nonzeros`.
const int num_residuals = program_->NumResiduals();
const int num_effective_parameters = program_->NumEffectiveParameters();
DynamicCompressedRowSparseMatrix* jacobian =
new DynamicCompressedRowSparseMatrix(num_residuals,
num_effective_parameters,
0);
CompressedRowJacobianWriter::PopulateJacobianRowAndColumnBlockVectors(
program_, jacobian);
return jacobian;
}
void DynamicCompressedRowJacobianWriter::Write(int residual_id,
int residual_offset,
double **jacobians,
SparseMatrix* base_jacobian) {
DynamicCompressedRowSparseMatrix* jacobian =
down_cast<DynamicCompressedRowSparseMatrix*>(base_jacobian);
// Get the `residual_block` of interest.
const ResidualBlock* residual_block =
program_->residual_blocks()[residual_id];
const int num_residuals = residual_block->NumResiduals();
vector<pair<int, int> > evaluated_jacobian_blocks;
CompressedRowJacobianWriter::GetOrderedParameterBlocks(
program_, residual_id, &evaluated_jacobian_blocks);
// `residual_offset` is the residual row in the global jacobian.
// Empty the jacobian rows.
jacobian->ClearRows(residual_offset, num_residuals);
// Iterate over each parameter block.
for (int i = 0; i < evaluated_jacobian_blocks.size(); ++i) {
const ParameterBlock* parameter_block =
program_->parameter_blocks()[evaluated_jacobian_blocks[i].first];
const int parameter_block_jacobian_index =
evaluated_jacobian_blocks[i].second;
const int parameter_block_size = parameter_block->LocalSize();
// For each parameter block only insert its non-zero entries.
for (int r = 0; r < num_residuals; ++r) {
for (int c = 0; c < parameter_block_size; ++c) {
const double& v = jacobians[parameter_block_jacobian_index][
r * parameter_block_size + c];
// Only insert non-zero entries.
if (v != 0.0) {
jacobian->InsertEntry(
residual_offset + r, parameter_block->delta_offset() + c, v);
}
}
}
}
}
} // namespace internal
} // namespace ceres

View File

@@ -0,0 +1,83 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: richie.stebbing@gmail.com (Richard Stebbing)
//
// A jacobian writer that directly writes to dynamic compressed row sparse
// matrices.
#ifndef CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_JACOBIAN_WRITER_H_
#define CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_JACOBIAN_WRITER_H_
#include "ceres/evaluator.h"
#include "ceres/scratch_evaluate_preparer.h"
namespace ceres {
namespace internal {
class Program;
class SparseMatrix;
class DynamicCompressedRowJacobianWriter {
public:
DynamicCompressedRowJacobianWriter(Evaluator::Options /* ignored */,
Program* program)
: program_(program) {
}
// JacobianWriter interface.
// The compressed row matrix has different layout than that assumed by
// the cost functions. The scratch space is therefore used to store
// the jacobians (including zeros) temporarily before only the non-zero
// entries are copied over to the larger jacobian in `Write`.
ScratchEvaluatePreparer* CreateEvaluatePreparers(int num_threads);
// Return a `DynamicCompressedRowSparseMatrix` which is filled by
// `Write`. Note that `Finalize` must be called to make the
// `CompressedRowSparseMatrix` interface valid.
SparseMatrix* CreateJacobian() const;
// Write only the non-zero jacobian entries for a residual block
// (specified by `residual_id`) into `base_jacobian`, starting at the row
// specifed by `residual_offset`.
//
// This method is thread-safe over residual blocks (each `residual_id`).
void Write(int residual_id,
int residual_offset,
double **jacobians,
SparseMatrix* base_jacobian);
private:
Program* program_;
};
} // namespace internal
} // namespace ceres
#endif // CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_JACOBIAN_WRITER_H_

View File

@@ -0,0 +1,107 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: richie.stebbing@gmail.com (Richard Stebbing)
#include <cstring>
#include "ceres/dynamic_compressed_row_sparse_matrix.h"
namespace ceres {
namespace internal {
DynamicCompressedRowSparseMatrix::DynamicCompressedRowSparseMatrix(
int num_rows,
int num_cols,
int initial_max_num_nonzeros)
: CompressedRowSparseMatrix(num_rows,
num_cols,
initial_max_num_nonzeros) {
dynamic_cols_.resize(num_rows);
dynamic_values_.resize(num_rows);
}
void DynamicCompressedRowSparseMatrix::InsertEntry(int row,
int col,
const double& value) {
CHECK_GE(row, 0);
CHECK_LT(row, num_rows());
CHECK_GE(col, 0);
CHECK_LT(col, num_cols());
dynamic_cols_[row].push_back(col);
dynamic_values_[row].push_back(value);
}
void DynamicCompressedRowSparseMatrix::ClearRows(int row_start,
int num_rows) {
for (int r = 0; r < num_rows; ++r) {
const int i = row_start + r;
CHECK_GE(i, 0);
CHECK_LT(i, this->num_rows());
dynamic_cols_[i].resize(0);
dynamic_values_[i].resize(0);
}
}
void DynamicCompressedRowSparseMatrix::Finalize(int num_additional_elements) {
// `num_additional_elements` is provided as an argument so that additional
// storage can be reserved when it is known by the finalizer.
CHECK_GE(num_additional_elements, 0);
// Count the number of non-zeros and resize `cols_` and `values_`.
int num_jacobian_nonzeros = 0;
for (int i = 0; i < dynamic_cols_.size(); ++i) {
num_jacobian_nonzeros += dynamic_cols_[i].size();
}
SetMaxNumNonZeros(num_jacobian_nonzeros + num_additional_elements);
// Flatten `dynamic_cols_` into `cols_` and `dynamic_values_`
// into `values_`.
int index_into_values_and_cols = 0;
for (int i = 0; i < num_rows(); ++i) {
mutable_rows()[i] = index_into_values_and_cols;
const int num_nonzero_columns = dynamic_cols_[i].size();
if (num_nonzero_columns > 0) {
memcpy(mutable_cols() + index_into_values_and_cols,
&dynamic_cols_[i][0],
dynamic_cols_[i].size() * sizeof(dynamic_cols_[0][0]));
memcpy(mutable_values() + index_into_values_and_cols,
&dynamic_values_[i][0],
dynamic_values_[i].size() * sizeof(dynamic_values_[0][0]));
index_into_values_and_cols += dynamic_cols_[i].size();
}
}
mutable_rows()[num_rows()] = index_into_values_and_cols;
CHECK_EQ(index_into_values_and_cols, num_jacobian_nonzeros)
<< "Ceres bug: final index into values_ and cols_ should be equal to "
<< "the number of jacobian nonzeros. Please contact the developers!";
}
} // namespace internal
} // namespace ceres

View File

@@ -0,0 +1,99 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: richie.stebbing@gmail.com (Richard Stebbing)
//
// A compressed row sparse matrix that provides an extended interface to
// allow dynamic insertion of entries. This is provided for the use case
// where the sparsity structure and number of non-zero entries is dynamic.
// This flexibility is achieved by using an (internal) scratch space that
// allows independent insertion of entries into each row (thread-safe).
// Once insertion is complete, the `Finalize` method must be called to ensure
// that the underlying `CompressedRowSparseMatrix` is consistent.
//
// This should only be used if you really do need a dynamic sparsity pattern.
#ifndef CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_SPARSE_MATRIX_H_
#define CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_SPARSE_MATRIX_H_
#include "ceres/compressed_row_sparse_matrix.h"
namespace ceres {
namespace internal {
class DynamicCompressedRowSparseMatrix : public CompressedRowSparseMatrix {
public:
// Set the number of rows and columns for the underlyig
// `CompressedRowSparseMatrix` and set the initial number of maximum non-zero
// entries. Note that following the insertion of entries, when `Finalize`
// is called the number of non-zeros is determined and all internal
// structures are adjusted as required. If you know the upper limit on the
// number of non-zeros, then passing this value here can prevent future
// memory reallocations which may improve performance. Otherwise, if no
// upper limit is available a value of 0 is sufficient.
//
// Typical usage of this class is to define a new instance with a given
// number of rows, columns and maximum number of non-zero elements
// (if available). Next, entries are inserted at row and column positions
// using `InsertEntry`. Finally, once all elements have been inserted,
// `Finalize` must be called to make the underlying
// `CompressedRowSparseMatrix` consistent.
DynamicCompressedRowSparseMatrix(int num_rows,
int num_cols,
int initial_max_num_nonzeros);
// Insert an entry at a given row and column position. This method is
// thread-safe across rows i.e. different threads can insert values
// simultaneously into different rows. It should be emphasised that this
// method always inserts a new entry and does not check for existing
// entries at the specified row and column position. Duplicate entries
// for a given row and column position will result in undefined
// behavior.
void InsertEntry(int row, int col, const double& value);
// Clear all entries for rows, starting from row index `row_start`
// and proceeding for `num_rows`.
void ClearRows(int row_start, int num_rows);
// Make the underlying internal `CompressedRowSparseMatrix` data structures
// consistent. Additional space for non-zero entries in the
// `CompressedRowSparseMatrix` can be reserved by specifying
// `num_additional_elements`. This is useful when it is known that rows will
// be appended to the `CompressedRowSparseMatrix` (e.g. appending a diagonal
// matrix to the jacobian) as it prevents need for future reallocation.
void Finalize(int num_additional_elements);
private:
vector<vector<int> > dynamic_cols_;
vector<vector<double> > dynamic_values_;
};
} // namespace internal
} // namespace ceres
#endif // CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_SPARSE_MATRIX_H_

View File

@@ -35,6 +35,8 @@
#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/crs_matrix.h"
#include "ceres/dense_jacobian_writer.h"
#include "ceres/dynamic_compressed_row_finalizer.h"
#include "ceres/dynamic_compressed_row_jacobian_writer.h"
#include "ceres/evaluator.h"
#include "ceres/internal/port.h"
#include "ceres/program_evaluator.h"
@@ -63,9 +65,17 @@ Evaluator* Evaluator::Create(const Evaluator::Options& options,
BlockJacobianWriter>(options,
program);
case SPARSE_NORMAL_CHOLESKY:
return new ProgramEvaluator<ScratchEvaluatePreparer,
CompressedRowJacobianWriter>(options,
program);
if (options.dynamic_sparsity) {
return new ProgramEvaluator<ScratchEvaluatePreparer,
DynamicCompressedRowJacobianWriter,
DynamicCompressedRowJacobianFinalizer>(
options, program);
} else {
return new ProgramEvaluator<ScratchEvaluatePreparer,
CompressedRowJacobianWriter>(options,
program);
}
default:
*error = "Invalid Linear Solver Type. Unable to create evaluator.";
return NULL;

View File

@@ -61,11 +61,13 @@ class Evaluator {
Options()
: num_threads(1),
num_eliminate_blocks(-1),
linear_solver_type(DENSE_QR) {}
linear_solver_type(DENSE_QR),
dynamic_sparsity(false) {}
int num_threads;
int num_eliminate_blocks;
LinearSolverType linear_solver_type;
bool dynamic_sparsity;
};
static Evaluator* Create(const Options& options,

View File

@@ -59,6 +59,8 @@ SPECIALIZATIONS = [(2, 2, 2),
(2, 3, "Eigen::Dynamic"),
(2, 4, 3),
(2, 4, 4),
(2, 4, 8),
(2, 4, 9),
(2, 4, "Eigen::Dynamic"),
(2, "Eigen::Dynamic", "Eigen::Dynamic"),
(4, 4, 2),
@@ -124,6 +126,9 @@ template class SchurEliminator<%s, %s, %s>;
"""
SPECIALIZATION_FILE = """
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"

View File

@@ -57,6 +57,8 @@ SPECIALIZATIONS = [(2, 2, 2),
(2, 3, "Eigen::Dynamic"),
(2, 4, 3),
(2, 4, 4),
(2, 4, 8),
(2, 4, 9),
(2, 4, "Eigen::Dynamic"),
(2, "Eigen::Dynamic", "Eigen::Dynamic"),
(4, 4, 2),
@@ -103,7 +105,7 @@ HEADER = """// Ceres Solver - A fast non-linear least squares minimizer
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specialization.py.
// This file is generated using generate_partitioned_matrix_view_specializations.py.
// Editing it manually is not recommended.
"""
@@ -122,6 +124,9 @@ template class PartitionedMatrixView<%s, %s, %s>;
"""
SPECIALIZATION_FILE = """
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specialization.py.
// This file is generated using generate_partitioned_matrix_view_specializations.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specialization.py.
// This file is generated using generate_partitioned_matrix_view_specializations.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specialization.py.
// This file is generated using generate_partitioned_matrix_view_specializations.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specialization.py.
// This file is generated using generate_partitioned_matrix_view_specializations.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specialization.py.
// This file is generated using generate_partitioned_matrix_view_specializations.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specialization.py.
// This file is generated using generate_partitioned_matrix_view_specializations.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specialization.py.
// This file is generated using generate_partitioned_matrix_view_specializations.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specialization.py.
// This file is generated using generate_partitioned_matrix_view_specializations.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specialization.py.
// This file is generated using generate_partitioned_matrix_view_specializations.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specialization.py.
// This file is generated using generate_partitioned_matrix_view_specializations.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"

View File

@@ -0,0 +1,59 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// Template specialization of PartitionedMatrixView.
//
// ========================================
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_partitioned_matrix_view_specializations.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
template class PartitionedMatrixView<2, 4, 8>;
} // namespace internal
} // namespace ceres
#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION

View File

@@ -0,0 +1,59 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// Template specialization of PartitionedMatrixView.
//
// ========================================
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_partitioned_matrix_view_specializations.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
template class PartitionedMatrixView<2, 4, 9>;
} // namespace internal
} // namespace ceres
#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specialization.py.
// This file is generated using generate_partitioned_matrix_view_specializations.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specialization.py.
// This file is generated using generate_partitioned_matrix_view_specializations.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specialization.py.
// This file is generated using generate_partitioned_matrix_view_specializations.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specialization.py.
// This file is generated using generate_partitioned_matrix_view_specializations.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specialization.py.
// This file is generated using generate_partitioned_matrix_view_specializations.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specialization.py.
// This file is generated using generate_partitioned_matrix_view_specializations.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"

View File

@@ -37,7 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specialization.py.
// This file is generated using generate_partitioned_matrix_view_specializations.py.
// Editing it manually is not recommended.

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specializations.py.
// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specializations.py.
// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specializations.py.
// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specializations.py.
// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specializations.py.
// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specializations.py.
// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specializations.py.
// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specializations.py.
// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specializations.py.
// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specializations.py.
// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"

View File

@@ -0,0 +1,59 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012, 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// Template specialization of SchurEliminator.
//
// ========================================
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
template class SchurEliminator<2, 4, 8>;
} // namespace internal
} // namespace ceres
#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION

View File

@@ -0,0 +1,59 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012, 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// Template specialization of SchurEliminator.
//
// ========================================
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
template class SchurEliminator<2, 4, 9>;
} // namespace internal
} // namespace ceres
#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specializations.py.
// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"

View File

@@ -40,6 +40,9 @@
// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specializations.py.
// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specializations.py.
// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specializations.py.
// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"

View File

@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specializations.py.
// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"

View File

@@ -37,7 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specializations.py.
// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.

View File

@@ -77,7 +77,6 @@ struct UnsignedInteger {
#undef CERES_INTSIZE
typedef Integer< 8>::type int8;
typedef Integer<16>::type int16;
typedef Integer<32>::type int32;
typedef Integer<64>::type int64;

View File

@@ -28,7 +28,6 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
#include <iomanip>
#include <iostream> // NOLINT
@@ -67,15 +66,17 @@ FunctionSample ValueAndGradientSample(const double x,
return sample;
};
} // namespace
std::ostream& operator<<(std::ostream &os, const FunctionSample& sample);
// Convenience stream operator for pushing FunctionSamples into log messages.
std::ostream& operator<<(std::ostream &os,
const FunctionSample& sample) {
std::ostream& operator<<(std::ostream &os, const FunctionSample& sample) {
os << sample.ToDebugString();
return os;
}
} // namespace
LineSearch::LineSearch(const LineSearch::Options& options)
: options_(options) {}
@@ -274,7 +275,7 @@ void ArmijoLineSearch::Search(const double step_size_estimate,
"satisfying the sufficient decrease condition within "
"specified max_num_iterations: %d.",
options().max_num_iterations);
LOG(WARNING) << summary->error;
LOG_IF(WARNING, !options().is_silent) << summary->error;
return;
}
@@ -292,7 +293,7 @@ void ArmijoLineSearch::Search(const double step_size_estimate,
StringPrintf("Line search failed: step_size too small: %.5e "
"with descent_direction_max_norm: %.5e.", step_size,
descent_direction_max_norm);
LOG(WARNING) << summary->error;
LOG_IF(WARNING, !options().is_silent) << summary->error;
return;
}
@@ -545,15 +546,16 @@ bool WolfeLineSearch::BracketingPhase(
// conditions, or a valid bracket containing such a point. Stop searching
// and set bracket_low to the size size amongst all those tested which
// minimizes f() and satisfies the Armijo condition.
LOG(WARNING) << "Line search failed: Wolfe bracketing phase shrank "
<< "bracket width: " << fabs(current.x - previous.x)
<< ", to < tolerance: " << options().min_step_size
<< ", with descent_direction_max_norm: "
<< descent_direction_max_norm << ", and failed to find "
<< "a point satisfying the strong Wolfe conditions or a "
<< "bracketing containing such a point. Accepting "
<< "point found satisfying Armijo condition only, to "
<< "allow continuation.";
LOG_IF(WARNING, !options().is_silent)
<< "Line search failed: Wolfe bracketing phase shrank "
<< "bracket width: " << fabs(current.x - previous.x)
<< ", to < tolerance: " << options().min_step_size
<< ", with descent_direction_max_norm: "
<< descent_direction_max_norm << ", and failed to find "
<< "a point satisfying the strong Wolfe conditions or a "
<< "bracketing containing such a point. Accepting "
<< "point found satisfying Armijo condition only, to "
<< "allow continuation.";
*bracket_low = current;
break;
@@ -566,7 +568,7 @@ bool WolfeLineSearch::BracketingPhase(
"find a point satisfying strong Wolfe conditions, or a "
"bracket containing such a point within specified "
"max_num_iterations: %d", options().max_num_iterations);
LOG(WARNING) << summary->error;
LOG_IF(WARNING, !options().is_silent) << summary->error;
// Ensure that bracket_low is always set to the step size amongst all
// those tested which minimizes f() and satisfies the Armijo condition
// when we terminate due to the 'artificial' max_num_iterations condition.
@@ -605,7 +607,7 @@ bool WolfeLineSearch::BracketingPhase(
StringPrintf("Line search failed: step_size too small: %.5e "
"with descent_direction_max_norm: %.5e", step_size,
descent_direction_max_norm);
LOG(WARNING) << summary->error;
LOG_IF(WARNING, !options().is_silent) << summary->error;
return false;
}
@@ -689,7 +691,7 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
initial_position.ToDebugString().c_str(),
bracket_low.ToDebugString().c_str(),
bracket_high.ToDebugString().c_str());
LOG(WARNING) << summary->error;
LOG_IF(WARNING, !options().is_silent) << summary->error;
solution->value_is_valid = false;
return false;
}
@@ -710,7 +712,7 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
"within specified max_num_iterations: %d, "
"(num iterations taken for bracketing: %d).",
options().max_num_iterations, num_bracketing_iterations);
LOG(WARNING) << summary->error;
LOG_IF(WARNING, !options().is_silent) << summary->error;
return false;
}
if (fabs(bracket_high.x - bracket_low.x) * descent_direction_max_norm
@@ -722,7 +724,7 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
"too small with descent_direction_max_norm: %.5e.",
fabs(bracket_high.x - bracket_low.x),
descent_direction_max_norm);
LOG(WARNING) << summary->error;
LOG_IF(WARNING, !options().is_silent) << summary->error;
return false;
}
@@ -773,7 +775,7 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
"between low_step: %.5e and high_step: %.5e "
"at which function is valid.",
solution->x, bracket_low.x, bracket_high.x);
LOG(WARNING) << summary->error;
LOG_IF(WARNING, !options().is_silent) << summary->error;
return false;
}
@@ -817,5 +819,3 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
} // namespace internal
} // namespace ceres
#endif // CERES_NO_LINE_SEARCH_MINIMIZER

View File

@@ -33,8 +33,6 @@
#ifndef CERES_INTERNAL_LINE_SEARCH_H_
#define CERES_INTERNAL_LINE_SEARCH_H_
#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
#include <string>
#include <vector>
#include "ceres/internal/eigen.h"
@@ -71,6 +69,7 @@ class LineSearch {
max_num_iterations(20),
sufficient_curvature_decrease(0.9),
max_step_expansion(10.0),
is_silent(false),
function(NULL) {}
// Degree of the polynomial used to approximate the objective
@@ -144,6 +143,8 @@ class LineSearch {
// By definition for expansion, max_step_expansion > 1.0.
double max_step_expansion;
bool is_silent;
// The one dimensional function that the line search algorithm
// minimizes.
Function* function;
@@ -295,5 +296,4 @@ class WolfeLineSearch : public LineSearch {
} // namespace internal
} // namespace ceres
#endif // CERES_NO_LINE_SEARCH_MINIMIZER
#endif // CERES_INTERNAL_LINE_SEARCH_H_

View File

@@ -28,8 +28,6 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
#include "ceres/line_search_direction.h"
#include "ceres/line_search_minimizer.h"
#include "ceres/low_rank_inverse_hessian.h"
@@ -372,5 +370,3 @@ LineSearchDirection::Create(const LineSearchDirection::Options& options) {
} // namespace internal
} // namespace ceres
#endif // CERES_NO_LINE_SEARCH_MINIMIZER

View File

@@ -31,8 +31,6 @@
#ifndef CERES_INTERNAL_LINE_SEARCH_DIRECTION_H_
#define CERES_INTERNAL_LINE_SEARCH_DIRECTION_H_
#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
#include "ceres/internal/eigen.h"
#include "ceres/line_search_minimizer.h"
#include "ceres/types.h"
@@ -71,5 +69,4 @@ class LineSearchDirection {
} // namespace internal
} // namespace ceres
#endif // CERES_NO_LINE_SEARCH_MINIMIZER
#endif // CERES_INTERNAL_LINE_SEARCH_DIRECTION_H_

View File

@@ -38,8 +38,6 @@
// For details on the theory and implementation see "Numerical
// Optimization" by Nocedal & Wright.
#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
#include "ceres/line_search_minimizer.h"
#include <algorithm>
@@ -64,28 +62,36 @@
namespace ceres {
namespace internal {
namespace {
// Small constant for various floating point issues.
// TODO(sameeragarwal): Change to a better name if this has only one
// use.
const double kEpsilon = 1e-12;
// TODO(sameeragarwal): I think there is a small bug here, in that if
// the evaluation fails, then the state can contain garbage. Look at
// this more carefully.
bool Evaluate(Evaluator* evaluator,
const Vector& x,
LineSearchMinimizer::State* state) {
const bool status = evaluator->Evaluate(x.data(),
&(state->cost),
NULL,
state->gradient.data(),
NULL);
if (status) {
state->gradient_squared_norm = state->gradient.squaredNorm();
state->gradient_max_norm = state->gradient.lpNorm<Eigen::Infinity>();
LineSearchMinimizer::State* state,
string* message) {
if (!evaluator->Evaluate(x.data(),
&(state->cost),
NULL,
state->gradient.data(),
NULL)) {
*message = "Gradient evaluation failed.";
return false;
}
return status;
Vector negative_gradient = -state->gradient;
Vector projected_gradient_step(x.size());
if (!evaluator->Plus(x.data(),
negative_gradient.data(),
projected_gradient_step.data())) {
*message = "projected_gradient_step = Plus(x, -gradient) failed.";
return false;
}
state->gradient_squared_norm = (x - projected_gradient_step).squaredNorm();
state->gradient_max_norm =
(x - projected_gradient_step).lpNorm<Eigen::Infinity>();
return true;
}
} // namespace
@@ -125,10 +131,11 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
iteration_summary.step_solver_time_in_seconds = 0;
// Do initial cost and Jacobian evaluation.
if (!Evaluate(evaluator, x, &current_state)) {
summary->message = "Terminating: Cost and gradient evaluation failed.";
if (!Evaluate(evaluator, x, &current_state, &summary->message)) {
summary->termination_type = FAILURE;
LOG_IF(WARNING, is_not_silent) << summary->message;
summary->message = "Initial cost and jacobian evaluation failed. "
"More details: " + summary->message;
LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
return;
}
@@ -138,22 +145,13 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
iteration_summary.gradient_max_norm = current_state.gradient_max_norm;
iteration_summary.gradient_norm = sqrt(current_state.gradient_squared_norm);
// The initial gradient max_norm is bounded from below so that we do
// not divide by zero.
const double initial_gradient_max_norm =
max(iteration_summary.gradient_max_norm, kEpsilon);
const double absolute_gradient_tolerance =
options.gradient_tolerance * initial_gradient_max_norm;
if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
summary->message =
StringPrintf("Terminating: Gradient tolerance reached. "
"Relative gradient max norm: %e <= %e",
iteration_summary.gradient_max_norm /
initial_gradient_max_norm,
options.gradient_tolerance);
if (iteration_summary.gradient_max_norm <= options.gradient_tolerance) {
summary->message = StringPrintf("Gradient tolerance reached. "
"Gradient max norm: %e <= %e",
iteration_summary.gradient_max_norm,
options.gradient_tolerance);
summary->termination_type = CONVERGENCE;
VLOG_IF(1, is_not_silent) << summary->message;
VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
return;
}
@@ -201,7 +199,7 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
&summary->message));
if (line_search.get() == NULL) {
summary->termination_type = FAILURE;
LOG_IF(ERROR, is_not_silent) << summary->message;
LOG_IF(ERROR, is_not_silent) << "Terminating: " << summary->message;
return;
}
@@ -209,24 +207,24 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
int num_line_search_direction_restarts = 0;
while (true) {
if (!RunCallbacks(options.callbacks, iteration_summary, summary)) {
return;
if (!RunCallbacks(options, iteration_summary, summary)) {
break;
}
iteration_start_time = WallTimeInSeconds();
if (iteration_summary.iteration >= options.max_num_iterations) {
summary->message = "Terminating: Maximum number of iterations reached.";
summary->message = "Maximum number of iterations reached.";
summary->termination_type = NO_CONVERGENCE;
VLOG_IF(1, is_not_silent) << summary->message;
VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
break;
}
const double total_solver_time = iteration_start_time - start_time +
summary->preprocessor_time_in_seconds;
if (total_solver_time >= options.max_solver_time_in_seconds) {
summary->message = "Terminating: Maximum solver time reached.";
summary->message = "Maximum solver time reached.";
summary->termination_type = NO_CONVERGENCE;
VLOG_IF(1, is_not_silent) << summary->message;
VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
break;
}
@@ -252,11 +250,11 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
// have already reached our specified maximum number of restarts,
// terminate optimization.
summary->message =
StringPrintf("Terminating: Line search direction failure: specified "
StringPrintf("Line search direction failure: specified "
"max_num_line_search_direction_restarts: %d reached.",
options.max_num_line_search_direction_restarts);
summary->termination_type = FAILURE;
LOG_IF(WARNING, is_not_silent) << summary->message;
LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
break;
} else if (!line_search_status) {
// Restart line search direction with gradient descent on first iteration
@@ -306,7 +304,7 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
initial_step_size, current_state.directional_derivative,
(current_state.cost - previous_state.cost));
summary->termination_type = FAILURE;
LOG_IF(WARNING, is_not_silent) << summary->message;
LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
break;
}
@@ -322,7 +320,7 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
"initial_gradient: %.5e.",
initial_step_size, current_state.cost,
current_state.directional_derivative);
LOG_IF(WARNING, is_not_silent) << summary->message;
LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
summary->termination_type = FAILURE;
break;
}
@@ -334,53 +332,31 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
iteration_summary.step_solver_time_in_seconds =
WallTimeInSeconds() - iteration_start_time;
// TODO(sameeragarwal): Collect stats.
//
// TODO(sameeragarwal): This call to Plus() directly updates the parameter
// vector via the VectorRef x. This is incorrect as we check the
// gradient and cost changes to determine if the step is accepted
// later, as such we could mutate x with a step that is not
// subsequently accepted, thus it is possible that
// summary->iterations.end()->x != x at termination.
if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) {
LOG_IF(WARNING, is_not_silent)
<< "x_plus_delta = Plus(x, delta) failed. ";
} else if (!Evaluate(evaluator, x_plus_delta, &current_state)) {
LOG_IF(WARNING, is_not_silent) << "Step failed to evaluate. ";
summary->termination_type = FAILURE;
summary->message =
"x_plus_delta = Plus(x, delta) failed. This should not happen "
"as the step was valid when it was selected by the line search.";
LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
break;
} else if (!Evaluate(evaluator,
x_plus_delta,
&current_state,
&summary->message)) {
summary->termination_type = FAILURE;
summary->message =
"Step failed to evaluate. This should not happen as the step was "
"valid when it was selected by the line search. More details: " +
summary->message;
LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
break;
} else {
x = x_plus_delta;
}
iteration_summary.gradient_max_norm = current_state.gradient_max_norm;
iteration_summary.gradient_norm = sqrt(current_state.gradient_squared_norm);
if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
summary->message =
StringPrintf("Terminating: Gradient tolerance reached. "
"Relative gradient max norm: %e <= %e. ",
(iteration_summary.gradient_max_norm /
initial_gradient_max_norm),
options.gradient_tolerance);
summary->termination_type = CONVERGENCE;
VLOG_IF(1, is_not_silent) << summary->message;
break;
}
iteration_summary.cost_change = previous_state.cost - current_state.cost;
const double absolute_function_tolerance =
options.function_tolerance * previous_state.cost;
if (fabs(iteration_summary.cost_change) < absolute_function_tolerance) {
summary->message =
StringPrintf("Terminating. Function tolerance reached. "
"|cost_change|/cost: %e <= %e",
fabs(iteration_summary.cost_change) /
previous_state.cost,
options.function_tolerance);
summary->termination_type = CONVERGENCE;
VLOG_IF(1, is_not_silent) << summary->message;
return;
}
iteration_summary.cost = current_state.cost + summary->fixed_cost;
iteration_summary.step_norm = delta.norm();
iteration_summary.step_is_valid = true;
@@ -401,10 +377,32 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
summary->iterations.push_back(iteration_summary);
++summary->num_successful_steps;
if (iteration_summary.gradient_max_norm <= options.gradient_tolerance) {
summary->message = StringPrintf("Gradient tolerance reached. "
"Gradient max norm: %e <= %e",
iteration_summary.gradient_max_norm,
options.gradient_tolerance);
summary->termination_type = CONVERGENCE;
VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
break;
}
const double absolute_function_tolerance =
options.function_tolerance * previous_state.cost;
if (fabs(iteration_summary.cost_change) < absolute_function_tolerance) {
summary->message =
StringPrintf("Function tolerance reached. "
"|cost_change|/cost: %e <= %e",
fabs(iteration_summary.cost_change) /
previous_state.cost,
options.function_tolerance);
summary->termination_type = CONVERGENCE;
VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
break;
}
}
}
} // namespace internal
} // namespace ceres
#endif // CERES_NO_LINE_SEARCH_MINIMIZER

View File

@@ -31,8 +31,6 @@
#ifndef CERES_INTERNAL_LINE_SEARCH_MINIMIZER_H_
#define CERES_INTERNAL_LINE_SEARCH_MINIMIZER_H_
#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
#include "ceres/minimizer.h"
#include "ceres/solver.h"
#include "ceres/types.h"
@@ -76,5 +74,4 @@ class LineSearchMinimizer : public Minimizer {
} // namespace internal
} // namespace ceres
#endif // CERES_NO_LINE_SEARCH_MINIMIZER
#endif // CERES_INTERNAL_LINE_SEARCH_MINIMIZER_H_

View File

@@ -98,6 +98,7 @@ class LinearSolver {
dense_linear_algebra_library_type(EIGEN),
sparse_linear_algebra_library_type(SUITE_SPARSE),
use_postordering(false),
dynamic_sparsity(false),
min_num_iterations(1),
max_num_iterations(1),
num_threads(1),
@@ -115,6 +116,7 @@ class LinearSolver {
// See solver.h for information about this flag.
bool use_postordering;
bool dynamic_sparsity;
// Number of internal iterations that the solver uses. This
// parameter only makes sense for iterative solvers like CG.

View File

@@ -37,13 +37,14 @@ namespace internal {
Minimizer::~Minimizer() {}
bool Minimizer::RunCallbacks(const vector<IterationCallback*> callbacks,
bool Minimizer::RunCallbacks(const Minimizer::Options& options,
const IterationSummary& iteration_summary,
Solver::Summary* summary) {
const bool is_not_silent = !options.is_silent;
CallbackReturnType status = SOLVER_CONTINUE;
int i = 0;
while (status == SOLVER_CONTINUE && i < callbacks.size()) {
status = (*callbacks[i])(iteration_summary);
while (status == SOLVER_CONTINUE && i < options.callbacks.size()) {
status = (*options.callbacks[i])(iteration_summary);
++i;
}
switch (status) {
@@ -51,11 +52,13 @@ bool Minimizer::RunCallbacks(const vector<IterationCallback*> callbacks,
return true;
case SOLVER_TERMINATE_SUCCESSFULLY:
summary->termination_type = USER_SUCCESS;
VLOG(1) << "Terminating: User callback returned USER_SUCCESS.";
summary->message = "User callback returned SOLVER_TERMINATE_SUCCESSFULLY.";
VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
return false;
case SOLVER_ABORT:
summary->termination_type = USER_FAILURE;
VLOG(1) << "Terminating: User callback returned USER_ABORT.";
summary->message = "User callback returned SOLVER_ABORT.";
VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
return false;
default:
LOG(FATAL) << "Unknown type of user callback status";

View File

@@ -114,6 +114,7 @@ class Minimizer {
callbacks = options.callbacks;
inner_iteration_minimizer = NULL;
inner_iteration_tolerance = options.inner_iteration_tolerance;
is_constrained = false;
}
int max_num_iterations;
@@ -180,9 +181,12 @@ class Minimizer {
Minimizer* inner_iteration_minimizer;
double inner_iteration_tolerance;
// Use a bounds constrained optimization algorithm.
bool is_constrained;
};
static bool RunCallbacks(const vector<IterationCallback*> callbacks,
static bool RunCallbacks(const Options& options,
const IterationSummary& iteration_summary,
Solver::Summary* summary);

View File

@@ -95,6 +95,8 @@
#ifndef CERES_INTERNAL_MUTEX_H_
#define CERES_INTERNAL_MUTEX_H_
#include "ceres/internal/port.h"
#if defined(CERES_NO_THREADS)
typedef int MutexType; // to keep a lock-count
#elif defined(_WIN32) || defined(__CYGWIN32__) || defined(__CYGWIN64__)

View File

@@ -31,7 +31,9 @@
#ifndef CERES_INTERNAL_PARAMETER_BLOCK_H_
#define CERES_INTERNAL_PARAMETER_BLOCK_H_
#include <algorithm>
#include <cstdlib>
#include <limits>
#include <string>
#include "ceres/array_utils.h"
#include "ceres/collections_port.h"
@@ -180,16 +182,59 @@ class ParameterBlock {
}
}
void SetUpperBound(int index, double upper_bound) {
CHECK_LT(index, size_);
if (upper_bounds_.get() == NULL) {
upper_bounds_.reset(new double[size_]);
std::fill(upper_bounds_.get(),
upper_bounds_.get() + size_,
std::numeric_limits<double>::max());
}
upper_bounds_[index] = upper_bound;
};
void SetLowerBound(int index, double lower_bound) {
CHECK_LT(index, size_);
if (lower_bounds_.get() == NULL) {
lower_bounds_.reset(new double[size_]);
std::fill(lower_bounds_.get(),
lower_bounds_.get() + size_,
-std::numeric_limits<double>::max());
}
lower_bounds_[index] = lower_bound;
}
// Generalization of the addition operation. This is the same as
// LocalParameterization::Plus() but uses the parameter's current state
// instead of operating on a passed in pointer.
// LocalParameterization::Plus() followed by projection onto the
// hyper cube implied by the bounds constraints.
bool Plus(const double *x, const double* delta, double* x_plus_delta) {
if (local_parameterization_ == NULL) {
if (local_parameterization_ != NULL) {
if (!local_parameterization_->Plus(x, delta, x_plus_delta)) {
return false;
}
} else {
VectorRef(x_plus_delta, size_) = ConstVectorRef(x, size_) +
ConstVectorRef(delta, size_);
return true;
}
return local_parameterization_->Plus(x, delta, x_plus_delta);
// Project onto the box constraints.
if (lower_bounds_.get() != NULL) {
for (int i = 0; i < size_; ++i) {
x_plus_delta[i] = std::max(x_plus_delta[i], lower_bounds_[i]);
}
}
if (upper_bounds_.get() != NULL) {
for (int i = 0; i < size_; ++i) {
x_plus_delta[i] = std::min(x_plus_delta[i], upper_bounds_[i]);
}
}
return true;
}
string ToString() const {
@@ -234,6 +279,22 @@ class ParameterBlock {
return residual_blocks_.get();
}
double LowerBoundForParameter(int index) const {
if (lower_bounds_.get() == NULL) {
return -std::numeric_limits<double>::max();
} else {
return lower_bounds_[index];
}
}
double UpperBoundForParameter(int index) const {
if (upper_bounds_.get() == NULL) {
return std::numeric_limits<double>::max();
} else {
return upper_bounds_[index];
}
}
private:
void Init(double* user_state,
int size,
@@ -312,6 +373,20 @@ class ParameterBlock {
// If non-null, contains the residual blocks this parameter block is in.
scoped_ptr<ResidualBlockSet> residual_blocks_;
// Upper and lower bounds for the parameter block. SetUpperBound
// and SetLowerBound lazily initialize the upper_bounds_ and
// lower_bounds_ arrays. If they are never called, then memory for
// these arrays is never allocated. Thus for problems where there
// are no bounds, or only one sided bounds we do not pay the cost of
// allocating memory for the inactive bounds constraints.
//
// Upon initialization these arrays are initialized to
// std::numeric_limits<double>::max() and
// -std::numeric_limits<double>::max() respectively which correspond
// to the parameter block being unconstrained.
scoped_array<double> upper_bounds_;
scoped_array<double> lower_bounds_;
// Necessary so ProblemImpl can clean up the parameterizations.
friend class ProblemImpl;
};

View File

@@ -37,7 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specialization.py.
// This file is generated using generate_partitioned_matrix_view_specializations.py.
// Editing it manually is not recommended.
#include "ceres/linear_solver.h"
@@ -111,6 +111,18 @@ PartitionedMatrixViewBase::Create(const LinearSolver::Options& options,
return new PartitionedMatrixView<2, 4, 4>(
matrix, options.elimination_groups[0]);
}
if ((options.row_block_size == 2) &&
(options.e_block_size == 4) &&
(options.f_block_size == 8)) {
return new PartitionedMatrixView<2, 4, 8>(
matrix, options.elimination_groups[0]);
}
if ((options.row_block_size == 2) &&
(options.e_block_size == 4) &&
(options.f_block_size == 9)) {
return new PartitionedMatrixView<2, 4, 9>(
matrix, options.elimination_groups[0]);
}
if ((options.row_block_size == 2) &&
(options.e_block_size == 4) &&
(options.f_block_size == Eigen::Dynamic)) {

View File

@@ -178,6 +178,23 @@ void Problem::SetParameterization(
problem_impl_->SetParameterization(values, local_parameterization);
}
const LocalParameterization* Problem::GetParameterization(
double* values) const {
return problem_impl_->GetParameterization(values);
}
void Problem::SetParameterLowerBound(double* values,
int index,
double lower_bound) {
problem_impl_->SetParameterLowerBound(values, index, lower_bound);
}
void Problem::SetParameterUpperBound(double* values,
int index,
double upper_bound) {
problem_impl_->SetParameterUpperBound(values, index, upper_bound);
}
bool Problem::Evaluate(const EvaluateOptions& evaluate_options,
double* cost,
vector<double>* residuals,
@@ -214,6 +231,10 @@ int Problem::ParameterBlockLocalSize(const double* parameter_block) const {
return problem_impl_->ParameterBlockLocalSize(parameter_block);
};
bool Problem::HasParameterBlock(const double* values) const {
return problem_impl_->HasParameterBlock(values);
}
void Problem::GetParameterBlocks(vector<double*>* parameter_blocks) const {
problem_impl_->GetParameterBlocks(parameter_blocks);
}

View File

@@ -142,7 +142,7 @@ ParameterBlock* ProblemImpl::InternalAddParameterBlock(double* values,
// For dynamic problems, add the list of dependent residual blocks, which is
// empty to start.
if (options_.enable_fast_parameter_block_removal) {
if (options_.enable_fast_removal) {
new_parameter_block->EnableResidualBlockDependencies();
}
parameter_block_map_[values] = new_parameter_block;
@@ -150,6 +150,26 @@ ParameterBlock* ProblemImpl::InternalAddParameterBlock(double* values,
return new_parameter_block;
}
void ProblemImpl::InternalRemoveResidualBlock(ResidualBlock* residual_block) {
CHECK_NOTNULL(residual_block);
// Perform no check on the validity of residual_block, that is handled in
// the public method: RemoveResidualBlock().
// If needed, remove the parameter dependencies on this residual block.
if (options_.enable_fast_removal) {
const int num_parameter_blocks_for_residual =
residual_block->NumParameterBlocks();
for (int i = 0; i < num_parameter_blocks_for_residual; ++i) {
residual_block->parameter_blocks()[i]
->RemoveResidualBlock(residual_block);
}
ResidualBlockSet::iterator it = residual_block_set_.find(residual_block);
residual_block_set_.erase(it);
}
DeleteBlockInVector(program_->mutable_residual_blocks(), residual_block);
}
// Deletes the residual block in question, assuming there are no other
// references to it inside the problem (e.g. by another parameter). Referenced
// cost and loss functions are tucked away for future deletion, since it is not
@@ -278,13 +298,18 @@ ResidualBlock* ProblemImpl::AddResidualBlock(
program_->residual_blocks_.size());
// Add dependencies on the residual to the parameter blocks.
if (options_.enable_fast_parameter_block_removal) {
if (options_.enable_fast_removal) {
for (int i = 0; i < parameter_blocks.size(); ++i) {
parameter_block_ptrs[i]->AddResidualBlock(new_residual_block);
}
}
program_->residual_blocks_.push_back(new_residual_block);
if (options_.enable_fast_removal) {
residual_block_set_.insert(new_residual_block);
}
return new_residual_block;
}
@@ -475,30 +500,46 @@ void ProblemImpl::DeleteBlockInVector(vector<Block*>* mutable_blocks,
void ProblemImpl::RemoveResidualBlock(ResidualBlock* residual_block) {
CHECK_NOTNULL(residual_block);
// If needed, remove the parameter dependencies on this residual block.
if (options_.enable_fast_parameter_block_removal) {
const int num_parameter_blocks_for_residual =
residual_block->NumParameterBlocks();
for (int i = 0; i < num_parameter_blocks_for_residual; ++i) {
residual_block->parameter_blocks()[i]
->RemoveResidualBlock(residual_block);
}
// Verify that residual_block identifies a residual in the current problem.
const string residual_not_found_message =
StringPrintf("Residual block to remove: %p not found. This usually means "
"one of three things have happened:\n"
" 1) residual_block is uninitialised and points to a random "
"area in memory.\n"
" 2) residual_block represented a residual that was added to"
" the problem, but referred to a parameter block which has "
"since been removed, which removes all residuals which "
"depend on that parameter block, and was thus removed.\n"
" 3) residual_block referred to a residual that has already "
"been removed from the problem (by the user).",
residual_block);
if (options_.enable_fast_removal) {
CHECK(residual_block_set_.find(residual_block) !=
residual_block_set_.end())
<< residual_not_found_message;
} else {
// Perform a full search over all current residuals.
CHECK(std::find(program_->residual_blocks().begin(),
program_->residual_blocks().end(),
residual_block) != program_->residual_blocks().end())
<< residual_not_found_message;
}
DeleteBlockInVector(program_->mutable_residual_blocks(), residual_block);
InternalRemoveResidualBlock(residual_block);
}
void ProblemImpl::RemoveParameterBlock(double* values) {
ParameterBlock* parameter_block =
FindParameterBlockOrDie(parameter_block_map_, values);
if (options_.enable_fast_parameter_block_removal) {
if (options_.enable_fast_removal) {
// Copy the dependent residuals from the parameter block because the set of
// dependents will change after each call to RemoveResidualBlock().
vector<ResidualBlock*> residual_blocks_to_remove(
parameter_block->mutable_residual_blocks()->begin(),
parameter_block->mutable_residual_blocks()->end());
for (int i = 0; i < residual_blocks_to_remove.size(); ++i) {
RemoveResidualBlock(residual_blocks_to_remove[i]);
InternalRemoveResidualBlock(residual_blocks_to_remove[i]);
}
} else {
// Scan all the residual blocks to remove ones that depend on the parameter
@@ -510,7 +551,7 @@ void ProblemImpl::RemoveParameterBlock(double* values) {
const int num_parameter_blocks = residual_block->NumParameterBlocks();
for (int j = 0; j < num_parameter_blocks; ++j) {
if (residual_block->parameter_blocks()[j] == parameter_block) {
RemoveResidualBlock(residual_block);
InternalRemoveResidualBlock(residual_block);
// The parameter blocks are guaranteed unique.
break;
}
@@ -535,6 +576,26 @@ void ProblemImpl::SetParameterization(
->SetParameterization(local_parameterization);
}
const LocalParameterization* ProblemImpl::GetParameterization(
double* values) const {
return FindParameterBlockOrDie(parameter_block_map_, values)
->local_parameterization();
}
void ProblemImpl::SetParameterLowerBound(double* values,
int index,
double lower_bound) {
FindParameterBlockOrDie(parameter_block_map_, values)
->SetLowerBound(index, lower_bound);
}
void ProblemImpl::SetParameterUpperBound(double* values,
int index,
double upper_bound) {
FindParameterBlockOrDie(parameter_block_map_, values)
->SetUpperBound(index, upper_bound);
}
bool ProblemImpl::Evaluate(const Problem::EvaluateOptions& evaluate_options,
double* cost,
vector<double>* residuals,
@@ -730,6 +791,11 @@ int ProblemImpl::ParameterBlockLocalSize(const double* parameter_block) const {
parameter_block_map_, const_cast<double*>(parameter_block))->LocalSize();
};
bool ProblemImpl::HasParameterBlock(const double* parameter_block) const {
return (parameter_block_map_.find(const_cast<double*>(parameter_block)) !=
parameter_block_map_.end());
}
void ProblemImpl::GetParameterBlocks(vector<double*>* parameter_blocks) const {
CHECK_NOTNULL(parameter_blocks);
parameter_blocks->resize(0);
@@ -764,7 +830,7 @@ void ProblemImpl::GetResidualBlocksForParameterBlock(
FindParameterBlockOrDie(parameter_block_map_,
const_cast<double*>(values));
if (options_.enable_fast_parameter_block_removal) {
if (options_.enable_fast_removal) {
// In this case the residual blocks that depend on the parameter block are
// stored in the parameter block already, so just copy them out.
CHECK_NOTNULL(residual_blocks)->resize(

View File

@@ -45,6 +45,7 @@
#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/collections_port.h"
#include "ceres/problem.h"
#include "ceres/types.h"
@@ -63,6 +64,7 @@ class ResidualBlock;
class ProblemImpl {
public:
typedef map<double*, ParameterBlock*> ParameterMap;
typedef HashSet<ResidualBlock*> ResidualBlockSet;
ProblemImpl();
explicit ProblemImpl(const Problem::Options& options);
@@ -127,6 +129,10 @@ class ProblemImpl {
void SetParameterBlockVariable(double* values);
void SetParameterization(double* values,
LocalParameterization* local_parameterization);
const LocalParameterization* GetParameterization(double* values) const;
void SetParameterLowerBound(double* values, int index, double lower_bound);
void SetParameterUpperBound(double* values, int index, double upper_bound);
bool Evaluate(const Problem::EvaluateOptions& options,
double* cost,
@@ -141,6 +147,9 @@ class ProblemImpl {
int ParameterBlockSize(const double* parameter_block) const;
int ParameterBlockLocalSize(const double* parameter_block) const;
bool HasParameterBlock(const double* parameter_block) const;
void GetParameterBlocks(vector<double*>* parameter_blocks) const;
void GetResidualBlocks(vector<ResidualBlockId>* residual_blocks) const;
@@ -156,9 +165,15 @@ class ProblemImpl {
Program* mutable_program() { return program_.get(); }
const ParameterMap& parameter_map() const { return parameter_block_map_; }
const ResidualBlockSet& residual_block_set() const {
CHECK(options_.enable_fast_removal)
<< "Fast removal not enabled, residual_block_set is not maintained.";
return residual_block_set_;
}
private:
ParameterBlock* InternalAddParameterBlock(double* values, int size);
void InternalRemoveResidualBlock(ResidualBlock* residual_block);
bool InternalEvaluate(Program* program,
double* cost,
@@ -180,6 +195,9 @@ class ProblemImpl {
// The mapping from user pointers to parameter blocks.
map<double*, ParameterBlock*> parameter_block_map_;
// Iff enable_fast_removal is enabled, contains the current residual blocks.
ResidualBlockSet residual_block_set_;
// The actual parameter and residual blocks.
internal::scoped_ptr<internal::Program> program_;

View File

@@ -79,6 +79,9 @@
#ifndef CERES_INTERNAL_PROGRAM_EVALUATOR_H_
#define CERES_INTERNAL_PROGRAM_EVALUATOR_H_
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifdef CERES_USE_OPENMP
#include <omp.h>
#endif
@@ -97,7 +100,13 @@
namespace ceres {
namespace internal {
template<typename EvaluatePreparer, typename JacobianWriter>
struct NullJacobianFinalizer {
void operator()(SparseMatrix* jacobian, int num_parameters) {}
};
template<typename EvaluatePreparer,
typename JacobianWriter,
typename JacobianFinalizer = NullJacobianFinalizer>
class ProgramEvaluator : public Evaluator {
public:
ProgramEvaluator(const Evaluator::Options &options, Program* program)
@@ -244,9 +253,10 @@ class ProgramEvaluator : public Evaluator {
}
if (!abort) {
const int num_parameters = program_->NumEffectiveParameters();
// Sum the cost and gradient (if requested) from each thread.
(*cost) = 0.0;
int num_parameters = program_->NumEffectiveParameters();
if (gradient != NULL) {
VectorRef(gradient, num_parameters).setZero();
}
@@ -257,6 +267,15 @@ class ProgramEvaluator : public Evaluator {
VectorRef(evaluate_scratch_[i].gradient.get(), num_parameters);
}
}
// Finalize the Jacobian if it is available.
// `num_parameters` is passed to the finalizer so that additional
// storage can be reserved for additional diagonal elements if
// necessary.
if (jacobian != NULL) {
JacobianFinalizer f;
f(jacobian, num_parameters);
}
}
return !abort;
}

View File

@@ -61,24 +61,6 @@ void InvalidateEvaluation(const ResidualBlock& block,
}
}
// Utility routine to print an array of doubles to a string. If the
// array pointer is NULL, it is treated as an array of zeros.
namespace {
void AppendArrayToString(const int size, const double* x, string* result) {
for (int i = 0; i < size; ++i) {
if (x == NULL) {
StringAppendF(result, "Not Computed ");
} else {
if (x[i] == kImpossibleValue) {
StringAppendF(result, "Uninitialized ");
} else {
StringAppendF(result, "%12g ", x[i]);
}
}
}
}
} // namespace
string EvaluationToString(const ResidualBlock& block,
double const* const* parameters,
double* cost,

View File

@@ -330,7 +330,10 @@ SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse(
}
summary.termination_type =
ss_.Cholesky(cholmod_lhs, factor_, &summary.message);
ss_.Cholesky(cholmod_lhs, factor_, &summary.message);
ss_.Free(cholmod_lhs);
if (summary.termination_type != LINEAR_SOLVER_SUCCESS) {
return summary;
}
@@ -340,7 +343,6 @@ SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse(
cholmod_dense* cholmod_solution = ss_.Solve(factor_,
cholmod_rhs,
&summary.message);
ss_.Free(cholmod_lhs);
ss_.Free(cholmod_rhs);
if (cholmod_solution == NULL) {

View File

@@ -100,6 +100,16 @@ SchurEliminatorBase::Create(const LinearSolver::Options& options) {
(options.f_block_size == 4)) {
return new SchurEliminator<2, 4, 4>(options);
}
if ((options.row_block_size == 2) &&
(options.e_block_size == 4) &&
(options.f_block_size == 8)) {
return new SchurEliminator<2, 4, 8>(options);
}
if ((options.row_block_size == 2) &&
(options.e_block_size == 4) &&
(options.f_block_size == 9)) {
return new SchurEliminator<2, 4, 9>(options);
}
if ((options.row_block_size == 2) &&
(options.e_block_size == 4) &&
(options.f_block_size == Eigen::Dynamic)) {

View File

@@ -45,6 +45,9 @@
#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 10
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifdef CERES_USE_OPENMP
#include <omp.h>
#endif

View File

@@ -28,6 +28,9 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_NO_SUITESPARSE
#include "ceres/single_linkage_clustering.h"

View File

@@ -31,6 +31,9 @@
#ifndef CERES_INTERNAL_SINGLE_LINKAGE_CLUSTERING_H_
#define CERES_INTERNAL_SINGLE_LINKAGE_CLUSTERING_H_
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_NO_SUITESPARSE
#include "ceres/collections_port.h"

View File

@@ -35,6 +35,7 @@
#ifndef CERES_INTERNAL_SMALL_BLAS_H_
#define CERES_INTERNAL_SMALL_BLAS_H_
#include "ceres/internal/port.h"
#include "ceres/internal/eigen.h"
#include "glog/logging.h"

View File

@@ -56,11 +56,6 @@ void StringifyOrdering(const vector<int>& ordering, string* report) {
} // namespace
Solver::Options::~Options() {
delete linear_solver_ordering;
delete inner_iteration_ordering;
}
Solver::~Solver() {}
void Solver::Solve(const Solver::Options& options,
@@ -344,8 +339,8 @@ string Solver::Summary::FullReport() const {
StringAppendF(&report, "Total %25.3f\n\n",
total_time_in_seconds);
StringAppendF(&report, "Termination: %25s\n",
TerminationTypeToString(termination_type));
StringAppendF(&report, "Termination: %25s (%s)\n",
TerminationTypeToString(termination_type), message.c_str());
return report;
};

View File

@@ -224,6 +224,28 @@ void SummarizeReducedProgram(const Program& program, Solver::Summary* summary) {
summary->num_residuals_reduced = program.NumResiduals();
}
bool ParameterBlocksAreFinite(const ProblemImpl* problem,
string* message) {
CHECK_NOTNULL(message);
const Program& program = problem->program();
const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
for (int i = 0; i < parameter_blocks.size(); ++i) {
const double* array = parameter_blocks[i]->user_state();
const int size = parameter_blocks[i]->Size();
const int invalid_index = FindInvalidValue(size, array);
if (invalid_index != size) {
*message = StringPrintf(
"ParameterBlock: %p with size %d has at least one invalid value.\n"
"First invalid value is at index: %d.\n"
"Parameter block values: ",
array, size, invalid_index);
AppendArrayToString(size, array, message);
return false;
}
}
return true;
}
bool LineSearchOptionsAreValid(const Solver::Options& options,
string* message) {
// Validate values for configuration parameters supplied by user.
@@ -301,6 +323,84 @@ bool LineSearchOptionsAreValid(const Solver::Options& options,
return true;
}
// Returns true if the program has any non-constant parameter blocks
// which have non-trivial bounds constraints.
bool IsBoundsConstrained(const Program& program) {
const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
for (int i = 0; i < parameter_blocks.size(); ++i) {
const ParameterBlock* parameter_block = parameter_blocks[i];
if (parameter_block->IsConstant()) {
continue;
}
const int size = parameter_block->Size();
for (int j = 0; j < size; ++j) {
const double lower_bound = parameter_block->LowerBoundForParameter(j);
const double upper_bound = parameter_block->UpperBoundForParameter(j);
if (lower_bound > -std::numeric_limits<double>::max() ||
upper_bound < std::numeric_limits<double>::max()) {
return true;
}
}
}
return false;
}
// Returns false, if the problem has any constant parameter blocks
// which are not feasible, or any variable parameter blocks which have
// a lower bound greater than or equal to the upper bound.
bool ParameterBlocksAreFeasible(const ProblemImpl* problem, string* message) {
CHECK_NOTNULL(message);
const Program& program = problem->program();
const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
for (int i = 0; i < parameter_blocks.size(); ++i) {
const ParameterBlock* parameter_block = parameter_blocks[i];
const double* parameters = parameter_block->user_state();
const int size = parameter_block->Size();
if (parameter_block->IsConstant()) {
// Constant parameter blocks must start in the feasible region
// to ultimately produce a feasible solution, since Ceres cannot
// change them.
for (int j = 0; j < size; ++j) {
const double lower_bound = parameter_block->LowerBoundForParameter(j);
const double upper_bound = parameter_block->UpperBoundForParameter(j);
if (parameters[j] < lower_bound || parameters[j] > upper_bound) {
*message = StringPrintf(
"ParameterBlock: %p with size %d has at least one infeasible "
"value."
"\nFirst infeasible value is at index: %d."
"\nLower bound: %e, value: %e, upper bound: %e"
"\nParameter block values: ",
parameters, size, j, lower_bound, parameters[j], upper_bound);
AppendArrayToString(size, parameters, message);
return false;
}
}
} else {
// Variable parameter blocks must have non-empty feasible
// regions, otherwise there is no way to produce a feasible
// solution.
for (int j = 0; j < size; ++j) {
const double lower_bound = parameter_block->LowerBoundForParameter(j);
const double upper_bound = parameter_block->UpperBoundForParameter(j);
if (lower_bound >= upper_bound) {
*message = StringPrintf(
"ParameterBlock: %p with size %d has at least one infeasible "
"bound."
"\nFirst infeasible bound is at index: %d."
"\nLower bound: %e, upper bound: %e"
"\nParameter block values: ",
parameters, size, j, lower_bound, upper_bound);
AppendArrayToString(size, parameters, message);
return false;
}
}
}
}
return true;
}
} // namespace
void SolverImpl::TrustRegionMinimize(
@@ -309,12 +409,18 @@ void SolverImpl::TrustRegionMinimize(
CoordinateDescentMinimizer* inner_iteration_minimizer,
Evaluator* evaluator,
LinearSolver* linear_solver,
double* parameters,
Solver::Summary* summary) {
Minimizer::Options minimizer_options(options);
minimizer_options.is_constrained = IsBoundsConstrained(*program);
// The optimizer works on contiguous parameter vectors; allocate
// some.
Vector parameters(program->NumParameters());
// Collect the discontiguous parameters into a contiguous state
// vector.
program->ParameterBlocksToStateVector(parameters.data());
// TODO(sameeragarwal): Add support for logging the configuration
// and more detailed stats.
scoped_ptr<IterationCallback> file_logging_callback;
if (!options.solver_log.empty()) {
file_logging_callback.reset(new FileLoggingCallback(options.solver_log));
@@ -329,7 +435,7 @@ void SolverImpl::TrustRegionMinimize(
&logging_callback);
}
StateUpdatingCallback updating_callback(program, parameters);
StateUpdatingCallback updating_callback(program, parameters.data());
if (options.update_state_every_iteration) {
// This must get pushed to the front of the callbacks so that it is run
// before any of the user callbacks.
@@ -359,20 +465,34 @@ void SolverImpl::TrustRegionMinimize(
TrustRegionMinimizer minimizer;
double minimizer_start_time = WallTimeInSeconds();
minimizer.Minimize(minimizer_options, parameters, summary);
minimizer.Minimize(minimizer_options, parameters.data(), summary);
// If the user aborted mid-optimization or the optimization
// terminated because of a numerical failure, then do not update
// user state.
if (summary->termination_type != USER_FAILURE &&
summary->termination_type != FAILURE) {
program->StateVectorToParameterBlocks(parameters.data());
program->CopyParameterBlockStateToUserState();
}
summary->minimizer_time_in_seconds =
WallTimeInSeconds() - minimizer_start_time;
}
#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
void SolverImpl::LineSearchMinimize(
const Solver::Options& options,
Program* program,
Evaluator* evaluator,
double* parameters,
Solver::Summary* summary) {
Minimizer::Options minimizer_options(options);
// The optimizer works on contiguous parameter vectors; allocate some.
Vector parameters(program->NumParameters());
// Collect the discontiguous parameters into a contiguous state vector.
program->ParameterBlocksToStateVector(parameters.data());
// TODO(sameeragarwal): Add support for logging the configuration
// and more detailed stats.
scoped_ptr<IterationCallback> file_logging_callback;
@@ -389,7 +509,7 @@ void SolverImpl::LineSearchMinimize(
&logging_callback);
}
StateUpdatingCallback updating_callback(program, parameters);
StateUpdatingCallback updating_callback(program, parameters.data());
if (options.update_state_every_iteration) {
// This must get pushed to the front of the callbacks so that it is run
// before any of the user callbacks.
@@ -401,11 +521,20 @@ void SolverImpl::LineSearchMinimize(
LineSearchMinimizer minimizer;
double minimizer_start_time = WallTimeInSeconds();
minimizer.Minimize(minimizer_options, parameters, summary);
minimizer.Minimize(minimizer_options, parameters.data(), summary);
// If the user aborted mid-optimization or the optimization
// terminated because of a numerical failure, then do not update
// user state.
if (summary->termination_type != USER_FAILURE &&
summary->termination_type != FAILURE) {
program->StateVectorToParameterBlocks(parameters.data());
program->CopyParameterBlockStateToUserState();
}
summary->minimizer_time_in_seconds =
WallTimeInSeconds() - minimizer_start_time;
}
#endif // CERES_NO_LINE_SEARCH_MINIMIZER
void SolverImpl::Solve(const Solver::Options& options,
ProblemImpl* problem_impl,
@@ -419,15 +548,11 @@ void SolverImpl::Solve(const Solver::Options& options,
<< " residual blocks, "
<< problem_impl->NumResiduals()
<< " residuals.";
*CHECK_NOTNULL(summary) = Solver::Summary();
if (options.minimizer_type == TRUST_REGION) {
TrustRegionSolve(options, problem_impl, summary);
} else {
#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
LineSearchSolve(options, problem_impl, summary);
#else
LOG(FATAL) << "Ceres Solver was compiled with -DLINE_SEARCH_MINIMIZER=OFF";
#endif
}
}
@@ -440,20 +565,15 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options,
Program* original_program = original_problem_impl->mutable_program();
ProblemImpl* problem_impl = original_problem_impl;
// Reset the summary object to its default values.
*CHECK_NOTNULL(summary) = Solver::Summary();
summary->minimizer_type = TRUST_REGION;
SummarizeGivenProgram(*original_program, summary);
SummarizeOrdering(original_options.linear_solver_ordering,
SummarizeOrdering(original_options.linear_solver_ordering.get(),
&(summary->linear_solver_ordering_given));
SummarizeOrdering(original_options.inner_iteration_ordering,
SummarizeOrdering(original_options.inner_iteration_ordering.get(),
&(summary->inner_iteration_ordering_given));
Solver::Options options(original_options);
options.linear_solver_ordering = NULL;
options.inner_iteration_ordering = NULL;
#ifndef CERES_USE_OPENMP
if (options.num_threads > 1) {
@@ -484,6 +604,16 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options,
return;
}
if (!ParameterBlocksAreFinite(problem_impl, &summary->message)) {
LOG(ERROR) << "Terminating: " << summary->message;
return;
}
if (!ParameterBlocksAreFeasible(problem_impl, &summary->message)) {
LOG(ERROR) << "Terminating: " << summary->message;
return;
}
event_logger.AddEvent("Init");
original_program->SetParameterBlockStatePtrsToUserStatePtrs();
@@ -507,17 +637,14 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options,
problem_impl = gradient_checking_problem_impl.get();
}
if (original_options.linear_solver_ordering != NULL) {
if (!IsOrderingValid(original_options, problem_impl, &summary->message)) {
if (options.linear_solver_ordering.get() != NULL) {
if (!IsOrderingValid(options, problem_impl, &summary->message)) {
LOG(ERROR) << summary->message;
return;
}
event_logger.AddEvent("CheckOrdering");
options.linear_solver_ordering =
new ParameterBlockOrdering(*original_options.linear_solver_ordering);
event_logger.AddEvent("CopyOrdering");
} else {
options.linear_solver_ordering = new ParameterBlockOrdering;
options.linear_solver_ordering.reset(new ParameterBlockOrdering);
const ProblemImpl::ParameterMap& parameter_map =
problem_impl->parameter_map();
for (ProblemImpl::ParameterMap::const_iterator it = parameter_map.begin();
@@ -528,13 +655,6 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options,
event_logger.AddEvent("ConstructOrdering");
}
if (original_options.inner_iteration_ordering != NULL) {
// Make a copy, as the options struct takes ownership of the
// ordering objects.
options.inner_iteration_ordering =
new ParameterBlockOrdering(*original_options.inner_iteration_ordering);
}
// Create the three objects needed to minimize: the transformed program, the
// evaluator, and the linear solver.
scoped_ptr<Program> reduced_program(CreateReducedProgram(&options,
@@ -547,7 +667,7 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options,
return;
}
SummarizeOrdering(options.linear_solver_ordering,
SummarizeOrdering(options.linear_solver_ordering.get(),
&(summary->linear_solver_ordering_used));
SummarizeReducedProgram(*reduced_program, summary);
@@ -630,14 +750,6 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options,
}
event_logger.AddEvent("CreateInnerIterationMinimizer");
// The optimizer works on contiguous parameter vectors; allocate some.
Vector parameters(reduced_program->NumParameters());
// Collect the discontiguous parameters into a contiguous state vector.
reduced_program->ParameterBlocksToStateVector(parameters.data());
Vector original_parameters = parameters;
double minimizer_start_time = WallTimeInSeconds();
summary->preprocessor_time_in_seconds =
minimizer_start_time - solver_start_time;
@@ -648,26 +760,12 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options,
inner_iteration_minimizer.get(),
evaluator.get(),
linear_solver.get(),
parameters.data(),
summary);
event_logger.AddEvent("Minimize");
SetSummaryFinalCost(summary);
// If the user aborted mid-optimization or the optimization
// terminated because of a numerical failure, then return without
// updating user state.
if (summary->termination_type == USER_FAILURE ||
summary->termination_type == FAILURE) {
return;
}
double post_process_start_time = WallTimeInSeconds();
// Push the contiguous optimized parameters back to the user's
// parameters.
reduced_program->StateVectorToParameterBlocks(parameters.data());
reduced_program->CopyParameterBlockStateToUserState();
SetSummaryFinalCost(summary);
// Ensure the program state is set to the user parameters on the way
// out.
@@ -695,7 +793,6 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options,
event_logger.AddEvent("PostProcess");
}
#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
void SolverImpl::LineSearchSolve(const Solver::Options& original_options,
ProblemImpl* original_problem_impl,
Solver::Summary* summary) {
@@ -704,9 +801,6 @@ void SolverImpl::LineSearchSolve(const Solver::Options& original_options,
Program* original_program = original_problem_impl->mutable_program();
ProblemImpl* problem_impl = original_problem_impl;
// Reset the summary object to its default values.
*CHECK_NOTNULL(summary) = Solver::Summary();
SummarizeGivenProgram(*original_program, summary);
summary->minimizer_type = LINE_SEARCH;
summary->line_search_direction_type =
@@ -723,6 +817,12 @@ void SolverImpl::LineSearchSolve(const Solver::Options& original_options,
return;
}
if (IsBoundsConstrained(problem_impl->program())) {
summary->message = "LINE_SEARCH Minimizer does not support bounds.";
LOG(ERROR) << "Terminating: " << summary->message;
return;
}
Solver::Options options(original_options);
// This ensures that we get a Block Jacobian Evaluator along with
@@ -730,8 +830,7 @@ void SolverImpl::LineSearchSolve(const Solver::Options& original_options,
// refactored to deal with the various bits of cleanups related to
// line search.
options.linear_solver_type = CGNR;
options.linear_solver_ordering = NULL;
options.inner_iteration_ordering = NULL;
#ifndef CERES_USE_OPENMP
if (options.num_threads > 1) {
@@ -746,15 +845,18 @@ void SolverImpl::LineSearchSolve(const Solver::Options& original_options,
summary->num_threads_given = original_options.num_threads;
summary->num_threads_used = options.num_threads;
if (original_options.linear_solver_ordering != NULL) {
if (!IsOrderingValid(original_options, problem_impl, &summary->message)) {
if (!ParameterBlocksAreFinite(problem_impl, &summary->message)) {
LOG(ERROR) << "Terminating: " << summary->message;
return;
}
if (options.linear_solver_ordering.get() != NULL) {
if (!IsOrderingValid(options, problem_impl, &summary->message)) {
LOG(ERROR) << summary->message;
return;
}
options.linear_solver_ordering =
new ParameterBlockOrdering(*original_options.linear_solver_ordering);
} else {
options.linear_solver_ordering = new ParameterBlockOrdering;
options.linear_solver_ordering.reset(new ParameterBlockOrdering);
const ProblemImpl::ParameterMap& parameter_map =
problem_impl->parameter_map();
for (ProblemImpl::ParameterMap::const_iterator it = parameter_map.begin();
@@ -764,6 +866,7 @@ void SolverImpl::LineSearchSolve(const Solver::Options& original_options,
}
}
original_program->SetParameterBlockStatePtrsToUserStatePtrs();
// If the user requests gradient checking, construct a new
@@ -825,39 +928,15 @@ void SolverImpl::LineSearchSolve(const Solver::Options& original_options,
return;
}
// The optimizer works on contiguous parameter vectors; allocate some.
Vector parameters(reduced_program->NumParameters());
// Collect the discontiguous parameters into a contiguous state vector.
reduced_program->ParameterBlocksToStateVector(parameters.data());
Vector original_parameters = parameters;
const double minimizer_start_time = WallTimeInSeconds();
summary->preprocessor_time_in_seconds =
minimizer_start_time - solver_start_time;
// Run the optimization.
LineSearchMinimize(options,
reduced_program.get(),
evaluator.get(),
parameters.data(),
summary);
// If the user aborted mid-optimization or the optimization
// terminated because of a numerical failure, then return without
// updating user state.
if (summary->termination_type == USER_FAILURE ||
summary->termination_type == FAILURE) {
return;
}
LineSearchMinimize(options, reduced_program.get(), evaluator.get(), summary);
const double post_process_start_time = WallTimeInSeconds();
// Push the contiguous optimized parameters back to the user's parameters.
reduced_program->StateVectorToParameterBlocks(parameters.data());
reduced_program->CopyParameterBlockStateToUserState();
SetSummaryFinalCost(summary);
// Ensure the program state is set to the user parameters on the way out.
@@ -876,7 +955,6 @@ void SolverImpl::LineSearchSolve(const Solver::Options& original_options,
summary->postprocessor_time_in_seconds =
WallTimeInSeconds() - post_process_start_time;
}
#endif // CERES_NO_LINE_SEARCH_MINIMIZER
bool SolverImpl::IsOrderingValid(const Solver::Options& options,
const ProblemImpl* problem_impl,
@@ -1045,16 +1123,16 @@ Program* SolverImpl::CreateReducedProgram(Solver::Options* options,
ProblemImpl* problem_impl,
double* fixed_cost,
string* error) {
CHECK_NOTNULL(options->linear_solver_ordering);
CHECK_NOTNULL(options->linear_solver_ordering.get());
Program* original_program = problem_impl->mutable_program();
scoped_ptr<Program> transformed_program(new Program(*original_program));
ParameterBlockOrdering* linear_solver_ordering =
options->linear_solver_ordering;
options->linear_solver_ordering.get();
const int min_group_id =
linear_solver_ordering->group_to_elements().begin()->first;
ParameterBlockOrdering* inner_iteration_ordering =
options->inner_iteration_ordering;
options->inner_iteration_ordering.get();
if (!RemoveFixedBlocksFromProgram(transformed_program.get(),
linear_solver_ordering,
inner_iteration_ordering,
@@ -1108,7 +1186,8 @@ Program* SolverImpl::CreateReducedProgram(Solver::Options* options,
return transformed_program.release();
}
if (options->linear_solver_type == SPARSE_NORMAL_CHOLESKY) {
if (options->linear_solver_type == SPARSE_NORMAL_CHOLESKY &&
!options->dynamic_sparsity) {
if (!ReorderProgramForSparseNormalCholesky(
options->sparse_linear_algebra_library_type,
linear_solver_ordering,
@@ -1127,7 +1206,7 @@ Program* SolverImpl::CreateReducedProgram(Solver::Options* options,
LinearSolver* SolverImpl::CreateLinearSolver(Solver::Options* options,
string* error) {
CHECK_NOTNULL(options);
CHECK_NOTNULL(options->linear_solver_ordering);
CHECK_NOTNULL(options->linear_solver_ordering.get());
CHECK_NOTNULL(error);
if (options->trust_region_strategy_type == DOGLEG) {
@@ -1230,6 +1309,7 @@ LinearSolver* SolverImpl::CreateLinearSolver(Solver::Options* options,
linear_solver_options.dense_linear_algebra_library_type =
options->dense_linear_algebra_library_type;
linear_solver_options.use_postordering = options->use_postordering;
linear_solver_options.dynamic_sparsity = options->dynamic_sparsity;
// Ignore user's postordering preferences and force it to be true if
// cholmod_camd is not available. This ensures that the linear
@@ -1382,6 +1462,7 @@ Evaluator* SolverImpl::CreateEvaluator(
->second.size())
: 0;
evaluator_options.num_threads = options.num_threads;
evaluator_options.dynamic_sparsity = options.dynamic_sparsity;
return Evaluator::Create(evaluator_options, program, error);
}
@@ -1397,7 +1478,7 @@ CoordinateDescentMinimizer* SolverImpl::CreateInnerIterationMinimizer(
scoped_ptr<ParameterBlockOrdering> inner_iteration_ordering;
ParameterBlockOrdering* ordering_ptr = NULL;
if (options.inner_iteration_ordering == NULL) {
if (options.inner_iteration_ordering.get() == NULL) {
// Find a recursive decomposition of the Hessian matrix as a set
// of independent sets of decreasing size and invert it. This
// seems to work better in practice, i.e., Cameras before
@@ -1424,7 +1505,7 @@ CoordinateDescentMinimizer* SolverImpl::CreateInnerIterationMinimizer(
return NULL;
}
}
ordering_ptr = options.inner_iteration_ordering;
ordering_ptr = options.inner_iteration_ordering.get();
}
if (!inner_iteration_minimizer->Init(program,

View File

@@ -67,10 +67,8 @@ class SolverImpl {
CoordinateDescentMinimizer* inner_iteration_minimizer,
Evaluator* evaluator,
LinearSolver* linear_solver,
double* parameters,
Solver::Summary* summary);
#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
static void LineSearchSolve(const Solver::Options& options,
ProblemImpl* problem_impl,
Solver::Summary* summary);
@@ -79,9 +77,7 @@ class SolverImpl {
static void LineSearchMinimize(const Solver::Options &options,
Program* program,
Evaluator* evaluator,
double* parameters,
Solver::Summary* summary);
#endif // CERES_NO_LINE_SEARCH_MINIMIZER
// Create the transformed Program, which has all the fixed blocks
// and residuals eliminated, and in the case of automatic schur

View File

@@ -28,6 +28,9 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#if !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARSE)
#include "ceres/sparse_normal_cholesky_solver.h"
@@ -56,13 +59,13 @@ SparseNormalCholeskySolver::SparseNormalCholeskySolver(
options_(options) {
}
SparseNormalCholeskySolver::~SparseNormalCholeskySolver() {
void SparseNormalCholeskySolver::FreeFactorization() {
#ifndef CERES_NO_SUITESPARSE
if (factor_ != NULL) {
ss_.Free(factor_);
factor_ = NULL;
}
#endif
#endif // CERES_NO_SUITESPARSE
#ifndef CERES_NO_CXSPARSE
if (cxsparse_factor_ != NULL) {
@@ -72,6 +75,10 @@ SparseNormalCholeskySolver::~SparseNormalCholeskySolver() {
#endif // CERES_NO_CXSPARSE
}
SparseNormalCholeskySolver::~SparseNormalCholeskySolver() {
FreeFactorization();
}
LinearSolver::Summary SparseNormalCholeskySolver::SolveImpl(
CompressedRowSparseMatrix* A,
const double* b,
@@ -150,13 +157,20 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse(
event_logger.AddEvent("Setup");
// Compute symbolic factorization if not available.
if (options_.dynamic_sparsity) {
FreeFactorization();
}
if (cxsparse_factor_ == NULL) {
if (options_.use_postordering) {
cxsparse_factor_ = cxsparse_.BlockAnalyzeCholesky(AtA,
A->col_blocks(),
A->col_blocks());
} else {
cxsparse_factor_ = cxsparse_.AnalyzeCholeskyWithNaturalOrdering(AtA);
if (options_.dynamic_sparsity) {
cxsparse_factor_ = cxsparse_.AnalyzeCholesky(AtA);
} else {
cxsparse_factor_ = cxsparse_.AnalyzeCholeskyWithNaturalOrdering(AtA);
}
}
}
event_logger.AddEvent("Analysis");
@@ -169,6 +183,7 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse(
summary.termination_type = LINEAR_SOLVER_FAILURE;
}
event_logger.AddEvent("Solve");
return summary;
}
#else
@@ -198,6 +213,9 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse(
cholmod_sparse lhs = ss_.CreateSparseMatrixTransposeView(A);
event_logger.AddEvent("Setup");
if (options_.dynamic_sparsity) {
FreeFactorization();
}
if (factor_ == NULL) {
if (options_.use_postordering) {
factor_ = ss_.BlockAnalyzeCholesky(&lhs,
@@ -205,7 +223,11 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse(
A->row_blocks(),
&summary.message);
} else {
factor_ = ss_.AnalyzeCholeskyWithNaturalOrdering(&lhs, &summary.message);
if (options_.dynamic_sparsity) {
factor_ = ss_.AnalyzeCholesky(&lhs, &summary.message);
} else {
factor_ = ss_.AnalyzeCholeskyWithNaturalOrdering(&lhs, &summary.message);
}
}
}
event_logger.AddEvent("Analysis");

View File

@@ -34,6 +34,9 @@
#ifndef CERES_INTERNAL_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
#define CERES_INTERNAL_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#if !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARSE)
#include "ceres/cxsparse.h"
@@ -71,6 +74,8 @@ class SparseNormalCholeskySolver : public CompressedRowSparseMatrixSolver {
const LinearSolver::PerSolveOptions& options,
double* rhs_and_solution);
void FreeFactorization();
SuiteSparse ss_;
// Cached factorization
cholmod_factor* factor_;

View File

@@ -43,7 +43,9 @@ namespace internal {
#ifdef _MSC_VER
enum { IS_COMPILER_MSVC = 1 };
#if _MSC_VER < 1800
#define va_copy(d, s) ((d) = (s))
#endif
#else
enum { IS_COMPILER_MSVC = 0 };
#endif

View File

@@ -28,6 +28,9 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_NO_SUITESPARSE
#include "ceres/suitesparse.h"

View File

@@ -33,6 +33,8 @@
#ifndef CERES_INTERNAL_SUITESPARSE_H_
#define CERES_INTERNAL_SUITESPARSE_H_
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_NO_SUITESPARSE

View File

@@ -44,6 +44,7 @@
#include "ceres/file.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/line_search.h"
#include "ceres/linear_least_squares_problems.h"
#include "ceres/sparse_matrix.h"
#include "ceres/stringprintf.h"
@@ -55,8 +56,53 @@
namespace ceres {
namespace internal {
namespace {
// Small constant for various floating point issues.
const double kEpsilon = 1e-12;
LineSearch::Summary DoLineSearch(const Minimizer::Options& options,
const Vector& x,
const Vector& gradient,
const double cost,
const Vector& delta,
Evaluator* evaluator) {
LineSearchFunction line_search_function(evaluator);
LineSearch::Options line_search_options;
line_search_options.is_silent = true;
line_search_options.interpolation_type =
options.line_search_interpolation_type;
line_search_options.min_step_size = options.min_line_search_step_size;
line_search_options.sufficient_decrease =
options.line_search_sufficient_function_decrease;
line_search_options.max_step_contraction =
options.max_line_search_step_contraction;
line_search_options.min_step_contraction =
options.min_line_search_step_contraction;
line_search_options.max_num_iterations =
options.max_num_line_search_step_size_iterations;
line_search_options.sufficient_curvature_decrease =
options.line_search_sufficient_curvature_decrease;
line_search_options.max_step_expansion =
options.max_line_search_step_expansion;
line_search_options.function = &line_search_function;
string message;
scoped_ptr<LineSearch>
line_search(CHECK_NOTNULL(
LineSearch::Create(ceres::ARMIJO,
line_search_options,
&message)));
LineSearch::Summary summary;
line_search_function.Init(x, delta);
// Try the trust region step.
line_search->Search(1.0, cost, gradient.dot(delta), &summary);
if (!summary.success) {
// If that was not successful, try the negative gradient as a
// search direction.
line_search_function.Init(x, -gradient);
line_search->Search(1.0, cost, -gradient.squaredNorm(), &summary);
}
return summary;
}
} // namespace
// Compute a scaling vector that is used to improve the conditioning
@@ -81,24 +127,30 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
double start_time = WallTimeInSeconds();
double iteration_start_time = start_time;
Init(options);
const bool is_not_silent = !options.is_silent;
summary->termination_type = NO_CONVERGENCE;
summary->num_successful_steps = 0;
summary->num_unsuccessful_steps = 0;
Evaluator* evaluator = CHECK_NOTNULL(options_.evaluator);
SparseMatrix* jacobian = CHECK_NOTNULL(options_.jacobian);
TrustRegionStrategy* strategy = CHECK_NOTNULL(options_.trust_region_strategy);
const bool is_not_silent = !options.is_silent;
// If the problem is bounds constrained, then enable the use of a
// line search after the trust region step has been computed. This
// line search will automatically use a projected test point onto
// the feasible set, there by guaranteeing the feasibility of the
// final output.
//
// TODO(sameeragarwal): Make line search available more generally.
const bool use_line_search = options.is_constrained;
summary->termination_type = NO_CONVERGENCE;
summary->num_successful_steps = 0;
summary->num_unsuccessful_steps = 0;
const int num_parameters = evaluator->NumParameters();
const int num_effective_parameters = evaluator->NumEffectiveParameters();
const int num_residuals = evaluator->NumResiduals();
VectorRef x_min(parameters, num_parameters);
Vector x = x_min;
double x_norm = x.norm();
Vector residuals(num_residuals);
Vector trust_region_step(num_effective_parameters);
Vector delta(num_effective_parameters);
@@ -106,6 +158,8 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
Vector gradient(num_effective_parameters);
Vector model_residuals(num_residuals);
Vector scale(num_effective_parameters);
Vector negative_gradient(num_effective_parameters);
Vector projected_gradient_step(num_parameters);
IterationSummary iteration_summary;
iteration_summary.iteration = 0;
@@ -121,6 +175,24 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
iteration_summary.linear_solver_iterations = 0;
iteration_summary.step_solver_time_in_seconds = 0;
VectorRef x_min(parameters, num_parameters);
Vector x = x_min;
// Project onto the feasible set.
if (options.is_constrained) {
delta.setZero();
if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) {
summary->message =
"Unable to project initial point onto the feasible set.";
summary->termination_type = FAILURE;
LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
return;
}
x_min = x_plus_delta;
x = x_plus_delta;
}
double x_norm = x.norm();
// Do initial cost and Jacobian evaluation.
double cost = 0.0;
if (!evaluator->Evaluate(x.data(),
@@ -128,42 +200,45 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
residuals.data(),
gradient.data(),
jacobian)) {
summary->message = "Terminating: Residual and Jacobian evaluation failed.";
summary->message = "Residual and Jacobian evaluation failed.";
summary->termination_type = FAILURE;
LOG_IF(WARNING, is_not_silent) << summary->message;
LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
return;
}
int num_consecutive_nonmonotonic_steps = 0;
double minimum_cost = cost;
double reference_cost = cost;
double accumulated_reference_model_cost_change = 0.0;
double candidate_cost = cost;
double accumulated_candidate_model_cost_change = 0.0;
negative_gradient = -gradient;
if (!evaluator->Plus(x.data(),
negative_gradient.data(),
projected_gradient_step.data())) {
summary->message = "Unable to compute gradient step.";
summary->termination_type = FAILURE;
LOG(ERROR) << "Terminating: " << summary->message;
return;
}
summary->initial_cost = cost + summary->fixed_cost;
iteration_summary.cost = cost + summary->fixed_cost;
iteration_summary.gradient_max_norm = gradient.lpNorm<Eigen::Infinity>();
iteration_summary.gradient_norm = gradient.norm();
iteration_summary.gradient_max_norm =
(x - projected_gradient_step).lpNorm<Eigen::Infinity>();
iteration_summary.gradient_norm = (x - projected_gradient_step).norm();
// The initial gradient max_norm is bounded from below so that we do
// not divide by zero.
const double initial_gradient_max_norm =
max(iteration_summary.gradient_max_norm, kEpsilon);
const double absolute_gradient_tolerance =
options_.gradient_tolerance * initial_gradient_max_norm;
if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
summary->message = StringPrintf("Terminating: Gradient tolerance reached. "
"Relative gradient max norm: %e <= %e",
(iteration_summary.gradient_max_norm /
initial_gradient_max_norm),
options_.gradient_tolerance);
if (iteration_summary.gradient_max_norm <= options.gradient_tolerance) {
summary->message = StringPrintf("Gradient tolerance reached. "
"Gradient max norm: %e <= %e",
iteration_summary.gradient_max_norm,
options_.gradient_tolerance);
summary->termination_type = CONVERGENCE;
VLOG_IF(1, is_not_silent) << summary->message;
VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
return;
}
if (options_.jacobi_scaling) {
EstimateScale(*jacobian, scale.data());
jacobian->ScaleColumns(scale.data());
} else {
scale.setOnes();
}
iteration_summary.iteration_time_in_seconds =
WallTimeInSeconds() - iteration_start_time;
iteration_summary.cumulative_time_in_seconds =
@@ -171,35 +246,34 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
+ summary->preprocessor_time_in_seconds;
summary->iterations.push_back(iteration_summary);
if (options_.jacobi_scaling) {
EstimateScale(*jacobian, scale.data());
jacobian->ScaleColumns(scale.data());
} else {
scale.setOnes();
}
int num_consecutive_nonmonotonic_steps = 0;
double minimum_cost = cost;
double reference_cost = cost;
double accumulated_reference_model_cost_change = 0.0;
double candidate_cost = cost;
double accumulated_candidate_model_cost_change = 0.0;
int num_consecutive_invalid_steps = 0;
bool inner_iterations_are_enabled = options.inner_iteration_minimizer != NULL;
while (true) {
bool inner_iterations_were_useful = false;
if (!RunCallbacks(options.callbacks, iteration_summary, summary)) {
if (!RunCallbacks(options, iteration_summary, summary)) {
return;
}
iteration_start_time = WallTimeInSeconds();
if (iteration_summary.iteration >= options_.max_num_iterations) {
summary->message = "Terminating: Maximum number of iterations reached.";
summary->message = "Maximum number of iterations reached.";
summary->termination_type = NO_CONVERGENCE;
VLOG_IF(1, is_not_silent) << summary->message;
VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
return;
}
const double total_solver_time = iteration_start_time - start_time +
summary->preprocessor_time_in_seconds;
if (total_solver_time >= options_.max_solver_time_in_seconds) {
summary->message = "Terminating: Maximum solver time reached.";
summary->message = "Maximum solver time reached.";
summary->termination_type = NO_CONVERGENCE;
VLOG_IF(1, is_not_silent) << summary->message;
VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
return;
}
@@ -229,10 +303,10 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
if (strategy_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
summary->message =
"Terminating. Linear solver failed due to unrecoverable "
"Linear solver failed due to unrecoverable "
"non-numeric causes. Please see the error log for clues. ";
summary->termination_type = FAILURE;
LOG_IF(WARNING, is_not_silent) << summary->message;
LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
return;
}
@@ -276,11 +350,11 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
if (++num_consecutive_invalid_steps >=
options_.max_num_consecutive_invalid_steps) {
summary->message = StringPrintf(
"Terminating. Number of successive invalid steps more "
"Number of successive invalid steps more "
"than Solver::Options::max_num_consecutive_invalid_steps: %d",
options_.max_num_consecutive_invalid_steps);
summary->termination_type = FAILURE;
LOG_IF(WARNING, is_not_silent) << summary->message;
LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
return;
}
@@ -305,19 +379,36 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
// Undo the Jacobian column scaling.
delta = (trust_region_step.array() * scale.array()).matrix();
double new_cost = numeric_limits<double>::max();
if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) {
// Try improving the step further by using an ARMIJO line
// search.
//
// TODO(sameeragarwal): What happens to trust region sizing as
// it interacts with the line search ?
if (use_line_search) {
const LineSearch::Summary line_search_summary =
DoLineSearch(options, x, gradient, cost, delta, evaluator);
if (line_search_summary.success) {
delta *= line_search_summary.optimal_step_size;
}
}
double new_cost = std::numeric_limits<double>::max();
if (evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) {
if (!evaluator->Evaluate(x_plus_delta.data(),
&new_cost,
NULL,
NULL,
NULL)) {
LOG(WARNING) << "Step failed to evaluate. "
<< "Treating it as a step with infinite cost";
new_cost = numeric_limits<double>::max();
}
} else {
LOG(WARNING) << "x_plus_delta = Plus(x, delta) failed. "
<< "Treating it as a step with infinite cost";
} else if (!evaluator->Evaluate(x_plus_delta.data(),
&new_cost,
NULL,
NULL,
NULL)) {
LOG(WARNING) << "Step failed to evaluate. "
<< "Treating it as a step with infinite cost";
new_cost = numeric_limits<double>::max();
} else {
}
if (new_cost < std::numeric_limits<double>::max()) {
// Check if performing an inner iteration will make it better.
if (inner_iterations_are_enabled) {
++summary->num_inner_iteration_steps;
@@ -368,13 +459,13 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
(x_norm + options_.parameter_tolerance);
if (iteration_summary.step_norm <= step_size_tolerance) {
summary->message =
StringPrintf("Terminating. Parameter tolerance reached. "
"relative step_norm: %e <= %e.",
StringPrintf("Parameter tolerance reached. "
"Relative step_norm: %e <= %e.",
(iteration_summary.step_norm /
(x_norm + options_.parameter_tolerance)),
options_.parameter_tolerance);
summary->termination_type = CONVERGENCE;
VLOG_IF(1, is_not_silent) << summary->message;
VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
return;
}
@@ -383,12 +474,12 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
options_.function_tolerance * cost;
if (fabs(iteration_summary.cost_change) < absolute_function_tolerance) {
summary->message =
StringPrintf("Terminating. Function tolerance reached. "
StringPrintf("Function tolerance reached. "
"|cost_change|/cost: %e <= %e",
fabs(iteration_summary.cost_change) / cost,
options_.function_tolerance);
summary->termination_type = CONVERGENCE;
VLOG_IF(1, is_not_silent) << summary->message;
VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
return;
}
@@ -475,6 +566,7 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
if (iteration_summary.step_is_successful) {
++summary->num_successful_steps;
strategy->StepAccepted(iteration_summary.relative_decrease);
x = x_plus_delta;
x_norm = x.norm();
@@ -485,25 +577,34 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
residuals.data(),
gradient.data(),
jacobian)) {
summary->message =
"Terminating: Residual and Jacobian evaluation failed.";
summary->message = "Residual and Jacobian evaluation failed.";
summary->termination_type = FAILURE;
LOG_IF(WARNING, is_not_silent) << summary->message;
LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
return;
}
iteration_summary.gradient_max_norm = gradient.lpNorm<Eigen::Infinity>();
iteration_summary.gradient_norm = gradient.norm();
if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
negative_gradient = -gradient;
if (!evaluator->Plus(x.data(),
negative_gradient.data(),
projected_gradient_step.data())) {
summary->message =
StringPrintf("Terminating: Gradient tolerance reached. "
"Relative gradient max norm: %e <= %e",
(iteration_summary.gradient_max_norm /
initial_gradient_max_norm),
options_.gradient_tolerance);
"projected_gradient_step = Plus(x, -gradient) failed.";
summary->termination_type = FAILURE;
LOG(ERROR) << "Terminating: " << summary->message;
return;
}
iteration_summary.gradient_max_norm =
(x - projected_gradient_step).lpNorm<Eigen::Infinity>();
iteration_summary.gradient_norm = (x - projected_gradient_step).norm();
if (iteration_summary.gradient_max_norm <= options.gradient_tolerance) {
summary->message = StringPrintf("Gradient tolerance reached. "
"Gradient max norm: %e <= %e",
iteration_summary.gradient_max_norm,
options_.gradient_tolerance);
summary->termination_type = CONVERGENCE;
VLOG_IF(1, is_not_silent) << summary->message;
VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
return;
}

View File

@@ -28,6 +28,9 @@
//
// Author: kushalav@google.com (Avanish Kushal)
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_NO_SUITESPARSE
#include "ceres/visibility.h"

View File

@@ -35,6 +35,9 @@
#ifndef CERES_INTERNAL_VISIBILITY_H_
#define CERES_INTERNAL_VISIBILITY_H_
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_NO_SUITESPARSE
#include <set>

View File

@@ -28,6 +28,9 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifndef CERES_NO_SUITESPARSE
#include "ceres/visibility_based_preconditioner.h"

View File

@@ -2,3 +2,4 @@
find ./include/ -type f | sed -r 's/^\.\///' | sort > files.txt
find ./internal/ -type f | sed -r 's/^\.\///' | sort >> files.txt
find ./config/ -type f | sed -r 's/^\.\///' | sort >> files.txt