Camera tracking integration
===========================
- Do not disable track when tracking frame-by-frame and tracking
threshold became bad.
- Show anchored image in track preview widget.
- Do not show search area for non-selected and disabled markers.
- Keep anchor constant position when sliding offset parameters in panel.
- Re-enabled occasionally disabled openmp for tracking.
- Renamed clearing operator so now buttons are more clear.
- Updated eigen to the very recent version.
NOTE: I had to enable static aligning again due to it gave crashes
when tracking on my new laptop. I'm not sure it'll work fine
on another systems.
This commit is contained in:
7
extern/Eigen3/Eigen/Core
vendored
7
extern/Eigen3/Eigen/Core
vendored
@@ -175,13 +175,6 @@
|
||||
#include <new>
|
||||
#endif
|
||||
|
||||
// this needs to be done after all possible windows C header includes and before any Eigen source includes
|
||||
// (system C++ includes are supposed to be able to deal with this already):
|
||||
// windows.h defines min and max macros which would make Eigen fail to compile.
|
||||
#if defined(min) || defined(max)
|
||||
#error The preprocessor symbols 'min' or 'max' are defined. If you are compiling on Windows, do #define NOMINMAX to prevent windows.h from defining these symbols.
|
||||
#endif
|
||||
|
||||
// defined in bits/termios.h
|
||||
#undef B0
|
||||
|
||||
|
||||
27
extern/Eigen3/Eigen/src/Cholesky/LDLT.h
vendored
27
extern/Eigen3/Eigen/src/Cholesky/LDLT.h
vendored
@@ -158,10 +158,19 @@ template<typename _MatrixType, int _UpLo> class LDLT
|
||||
}
|
||||
|
||||
/** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A.
|
||||
*
|
||||
* This function also supports in-place solves using the syntax <tt>x = decompositionObject.solve(x)</tt> .
|
||||
*
|
||||
* \note_about_checking_solutions
|
||||
*
|
||||
* \sa solveInPlace(), MatrixBase::ldlt()
|
||||
* More precisely, this method solves \f$ A x = b \f$ using the decomposition \f$ A = P^T L D L^* P \f$
|
||||
* by solving the systems \f$ P^T y_1 = b \f$, \f$ L y_2 = y_1 \f$, \f$ D y_3 = y_2 \f$,
|
||||
* \f$ L^* y_4 = y_3 \f$ and \f$ P x = y_4 \f$ in succession. If the matrix \f$ A \f$ is singular, then
|
||||
* \f$ D \f$ will also be singular (all the other matrices are invertible). In that case, the
|
||||
* least-square solution of \f$ D y_3 = y_2 \f$ is computed. This does not mean that this function
|
||||
* computes the least-square solution of \f$ A x = b \f$ is \f$ A \f$ is singular.
|
||||
*
|
||||
* \sa MatrixBase::ldlt()
|
||||
*/
|
||||
template<typename Rhs>
|
||||
inline const internal::solve_retval<LDLT, Rhs>
|
||||
@@ -376,7 +385,21 @@ struct solve_retval<LDLT<_MatrixType,_UpLo>, Rhs>
|
||||
dec().matrixL().solveInPlace(dst);
|
||||
|
||||
// dst = D^-1 (L^-1 P b)
|
||||
dst = dec().vectorD().asDiagonal().inverse() * dst;
|
||||
// more precisely, use pseudo-inverse of D (see bug 241)
|
||||
using std::abs;
|
||||
using std::max;
|
||||
typedef typename LDLTType::MatrixType MatrixType;
|
||||
typedef typename LDLTType::Scalar Scalar;
|
||||
typedef typename LDLTType::RealScalar RealScalar;
|
||||
const Diagonal<const MatrixType> vectorD = dec().vectorD();
|
||||
RealScalar tolerance = (max)(vectorD.array().abs().maxCoeff() * NumTraits<Scalar>::epsilon(),
|
||||
RealScalar(1) / NumTraits<RealScalar>::highest()); // motivated by LAPACK's xGELSS
|
||||
for (Index i = 0; i < vectorD.size(); ++i) {
|
||||
if(abs(vectorD(i)) > tolerance)
|
||||
dst.row(i) /= vectorD(i);
|
||||
else
|
||||
dst.row(i).setZero();
|
||||
}
|
||||
|
||||
// dst = L^-T (D^-1 L^-1 P b)
|
||||
dec().matrixU().solveInPlace(dst);
|
||||
|
||||
4
extern/Eigen3/Eigen/src/Cholesky/LLT.h
vendored
4
extern/Eigen3/Eigen/src/Cholesky/LLT.h
vendored
@@ -233,7 +233,7 @@ template<> struct llt_inplace<Lower>
|
||||
|
||||
Index blockSize = size/8;
|
||||
blockSize = (blockSize/16)*16;
|
||||
blockSize = std::min(std::max(blockSize,Index(8)), Index(128));
|
||||
blockSize = (std::min)((std::max)(blockSize,Index(8)), Index(128));
|
||||
|
||||
for (Index k=0; k<size; k+=blockSize)
|
||||
{
|
||||
@@ -241,7 +241,7 @@ template<> struct llt_inplace<Lower>
|
||||
// A00 | - | -
|
||||
// lu = A10 | A11 | -
|
||||
// A20 | A21 | A22
|
||||
Index bs = std::min(blockSize, size-k);
|
||||
Index bs = (std::min)(blockSize, size-k);
|
||||
Index rs = size - k - bs;
|
||||
Block<MatrixType,Dynamic,Dynamic> A11(m,k, k, bs,bs);
|
||||
Block<MatrixType,Dynamic,Dynamic> A21(m,k+bs,k, rs,bs);
|
||||
|
||||
13
extern/Eigen3/Eigen/src/Core/BandMatrix.h
vendored
13
extern/Eigen3/Eigen/src/Core/BandMatrix.h
vendored
@@ -87,7 +87,7 @@ class BandMatrixBase : public EigenBase<Derived>
|
||||
if (i<=supers())
|
||||
{
|
||||
start = supers()-i;
|
||||
len = std::min(rows(),std::max<Index>(0,coeffs().rows() - (supers()-i)));
|
||||
len = (std::min)(rows(),std::max<Index>(0,coeffs().rows() - (supers()-i)));
|
||||
}
|
||||
else if (i>=rows()-subs())
|
||||
len = std::max<Index>(0,coeffs().rows() - (i + 1 - rows() + subs()));
|
||||
@@ -96,11 +96,11 @@ class BandMatrixBase : public EigenBase<Derived>
|
||||
|
||||
/** \returns a vector expression of the main diagonal */
|
||||
inline Block<CoefficientsType,1,SizeAtCompileTime> diagonal()
|
||||
{ return Block<CoefficientsType,1,SizeAtCompileTime>(coeffs(),supers(),0,1,std::min(rows(),cols())); }
|
||||
{ return Block<CoefficientsType,1,SizeAtCompileTime>(coeffs(),supers(),0,1,(std::min)(rows(),cols())); }
|
||||
|
||||
/** \returns a vector expression of the main diagonal (const version) */
|
||||
inline const Block<const CoefficientsType,1,SizeAtCompileTime> diagonal() const
|
||||
{ return Block<const CoefficientsType,1,SizeAtCompileTime>(coeffs(),supers(),0,1,std::min(rows(),cols())); }
|
||||
{ return Block<const CoefficientsType,1,SizeAtCompileTime>(coeffs(),supers(),0,1,(std::min)(rows(),cols())); }
|
||||
|
||||
template<int Index> struct DiagonalIntReturnType {
|
||||
enum {
|
||||
@@ -122,13 +122,13 @@ class BandMatrixBase : public EigenBase<Derived>
|
||||
/** \returns a vector expression of the \a N -th sub or super diagonal */
|
||||
template<int N> inline typename DiagonalIntReturnType<N>::Type diagonal()
|
||||
{
|
||||
return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers()-N, std::max(0,N), 1, diagonalLength(N));
|
||||
return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N));
|
||||
}
|
||||
|
||||
/** \returns a vector expression of the \a N -th sub or super diagonal */
|
||||
template<int N> inline const typename DiagonalIntReturnType<N>::Type diagonal() const
|
||||
{
|
||||
return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers()-N, std::max(0,N), 1, diagonalLength(N));
|
||||
return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N));
|
||||
}
|
||||
|
||||
/** \returns a vector expression of the \a i -th sub or super diagonal */
|
||||
@@ -166,7 +166,7 @@ class BandMatrixBase : public EigenBase<Derived>
|
||||
protected:
|
||||
|
||||
inline Index diagonalLength(Index i) const
|
||||
{ return i<0 ? std::min(cols(),rows()+i) : std::min(rows(),cols()-i); }
|
||||
{ return i<0 ? (std::min)(cols(),rows()+i) : (std::min)(rows(),cols()-i); }
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -284,6 +284,7 @@ class BandMatrixWrapper : public BandMatrixBase<BandMatrixWrapper<_CoefficientsT
|
||||
: m_coeffs(coeffs),
|
||||
m_rows(rows), m_supers(supers), m_subs(subs)
|
||||
{
|
||||
EIGEN_UNUSED_VARIABLE(cols);
|
||||
//internal::assert(coeffs.cols()==cols() && (supers()+subs()+1)==coeffs.rows());
|
||||
}
|
||||
|
||||
|
||||
@@ -742,7 +742,7 @@ struct setIdentity_impl<Derived, true>
|
||||
static EIGEN_STRONG_INLINE Derived& run(Derived& m)
|
||||
{
|
||||
m.setZero();
|
||||
const Index size = std::min(m.rows(), m.cols());
|
||||
const Index size = (std::min)(m.rows(), m.cols());
|
||||
for(Index i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1);
|
||||
return m;
|
||||
}
|
||||
|
||||
2
extern/Eigen3/Eigen/src/Core/Diagonal.h
vendored
2
extern/Eigen3/Eigen/src/Core/Diagonal.h
vendored
@@ -87,7 +87,7 @@ template<typename MatrixType, int DiagIndex> class Diagonal
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal)
|
||||
|
||||
inline Index rows() const
|
||||
{ return m_index.value()<0 ? std::min(m_matrix.cols(),m_matrix.rows()+m_index.value()) : std::min(m_matrix.rows(),m_matrix.cols()-m_index.value()); }
|
||||
{ return m_index.value()<0 ? (std::min)(m_matrix.cols(),m_matrix.rows()+m_index.value()) : (std::min)(m_matrix.rows(),m_matrix.cols()-m_index.value()); }
|
||||
|
||||
inline Index cols() const { return 1; }
|
||||
|
||||
|
||||
8
extern/Eigen3/Eigen/src/Core/Dot.h
vendored
8
extern/Eigen3/Eigen/src/Core/Dot.h
vendored
@@ -116,7 +116,9 @@ MatrixBase<Derived>::eigen2_dot(const MatrixBase<OtherDerived>& other) const
|
||||
|
||||
//---------- implementation of L2 norm and related functions ----------
|
||||
|
||||
/** \returns the squared \em l2 norm of *this, i.e., for vectors, the dot product of *this with itself.
|
||||
/** \returns, for vectors, the squared \em l2 norm of \c *this, and for matrices the Frobenius norm.
|
||||
* In both cases, it consists in the sum of the square of all the matrix entries.
|
||||
* For vectors, this is also equals to the dot product of \c *this with itself.
|
||||
*
|
||||
* \sa dot(), norm()
|
||||
*/
|
||||
@@ -126,7 +128,9 @@ EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scala
|
||||
return internal::real((*this).cwiseAbs2().sum());
|
||||
}
|
||||
|
||||
/** \returns the \em l2 norm of *this, i.e., for vectors, the square root of the dot product of *this with itself.
|
||||
/** \returns, for vectors, the \em l2 norm of \c *this, and for matrices the Frobenius norm.
|
||||
* In both cases, it consists in the square root of the sum of the square of all the matrix entries.
|
||||
* For vectors, this is also equals to the square root of the dot product of \c *this with itself.
|
||||
*
|
||||
* \sa dot(), squaredNorm()
|
||||
*/
|
||||
|
||||
8
extern/Eigen3/Eigen/src/Core/Functors.h
vendored
8
extern/Eigen3/Eigen/src/Core/Functors.h
vendored
@@ -116,7 +116,7 @@ struct functor_traits<scalar_conj_product_op<LhsScalar,RhsScalar> > {
|
||||
*/
|
||||
template<typename Scalar> struct scalar_min_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_min_op)
|
||||
EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::min; return min(a, b); }
|
||||
EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::min; return (min)(a, b); }
|
||||
template<typename Packet>
|
||||
EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
|
||||
{ return internal::pmin(a,b); }
|
||||
@@ -139,7 +139,7 @@ struct functor_traits<scalar_min_op<Scalar> > {
|
||||
*/
|
||||
template<typename Scalar> struct scalar_max_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_max_op)
|
||||
EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::max; return max(a, b); }
|
||||
EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::max; return (max)(a, b); }
|
||||
template<typename Packet>
|
||||
EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
|
||||
{ return internal::pmax(a,b); }
|
||||
@@ -167,8 +167,8 @@ template<typename Scalar> struct scalar_hypot_op {
|
||||
{
|
||||
using std::max;
|
||||
using std::min;
|
||||
Scalar p = max(_x, _y);
|
||||
Scalar q = min(_x, _y);
|
||||
Scalar p = (max)(_x, _y);
|
||||
Scalar q = (min)(_x, _y);
|
||||
Scalar qp = q/p;
|
||||
return p * sqrt(Scalar(1) + qp*qp);
|
||||
}
|
||||
|
||||
2
extern/Eigen3/Eigen/src/Core/Fuzzy.h
vendored
2
extern/Eigen3/Eigen/src/Core/Fuzzy.h
vendored
@@ -37,7 +37,7 @@ struct isApprox_selector
|
||||
using std::min;
|
||||
const typename internal::nested<Derived,2>::type nested(x);
|
||||
const typename internal::nested<OtherDerived,2>::type otherNested(y);
|
||||
return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * min(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum());
|
||||
return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * (min)(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum());
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -134,12 +134,12 @@ pdiv(const Packet& a,
|
||||
/** \internal \returns the min of \a a and \a b (coeff-wise) */
|
||||
template<typename Packet> inline Packet
|
||||
pmin(const Packet& a,
|
||||
const Packet& b) { using std::min; return min(a, b); }
|
||||
const Packet& b) { using std::min; return (min)(a, b); }
|
||||
|
||||
/** \internal \returns the max of \a a and \a b (coeff-wise) */
|
||||
template<typename Packet> inline Packet
|
||||
pmax(const Packet& a,
|
||||
const Packet& b) { using std::max; return max(a, b); }
|
||||
const Packet& b) { using std::max; return (max)(a, b); }
|
||||
|
||||
/** \internal \returns the absolute value of \a a */
|
||||
template<typename Packet> inline Packet
|
||||
|
||||
6
extern/Eigen3/Eigen/src/Core/Map.h
vendored
6
extern/Eigen3/Eigen/src/Core/Map.h
vendored
@@ -34,7 +34,7 @@
|
||||
* \tparam PlainObjectType the equivalent matrix type of the mapped data
|
||||
* \tparam MapOptions specifies whether the pointer is \c #Aligned, or \c #Unaligned.
|
||||
* The default is \c #Unaligned.
|
||||
* \tparam StrideType optionnally specifies strides. By default, Map assumes the memory layout
|
||||
* \tparam StrideType optionally specifies strides. By default, Map assumes the memory layout
|
||||
* of an ordinary, contiguous array. This can be overridden by specifying strides.
|
||||
* The type passed here must be a specialization of the Stride template, see examples below.
|
||||
*
|
||||
@@ -72,9 +72,9 @@
|
||||
* Example: \include Map_placement_new.cpp
|
||||
* Output: \verbinclude Map_placement_new.out
|
||||
*
|
||||
* This class is the return type of Matrix::Map() but can also be used directly.
|
||||
* This class is the return type of PlainObjectBase::Map() but can also be used directly.
|
||||
*
|
||||
* \sa Matrix::Map(), \ref TopicStorageOrders
|
||||
* \sa PlainObjectBase::Map(), \ref TopicStorageOrders
|
||||
*/
|
||||
|
||||
namespace internal {
|
||||
|
||||
8
extern/Eigen3/Eigen/src/Core/MathFunctions.h
vendored
8
extern/Eigen3/Eigen/src/Core/MathFunctions.h
vendored
@@ -378,8 +378,8 @@ struct hypot_impl
|
||||
using std::min;
|
||||
RealScalar _x = abs(x);
|
||||
RealScalar _y = abs(y);
|
||||
RealScalar p = max(_x, _y);
|
||||
RealScalar q = min(_x, _y);
|
||||
RealScalar p = (max)(_x, _y);
|
||||
RealScalar q = (min)(_x, _y);
|
||||
RealScalar qp = q/p;
|
||||
return p * sqrt(RealScalar(1) + qp*qp);
|
||||
}
|
||||
@@ -737,7 +737,7 @@ struct scalar_fuzzy_default_impl<Scalar, false, false>
|
||||
static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec)
|
||||
{
|
||||
using std::min;
|
||||
return abs(x - y) <= min(abs(x), abs(y)) * prec;
|
||||
return abs(x - y) <= (min)(abs(x), abs(y)) * prec;
|
||||
}
|
||||
static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar& prec)
|
||||
{
|
||||
@@ -776,7 +776,7 @@ struct scalar_fuzzy_default_impl<Scalar, true, false>
|
||||
static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec)
|
||||
{
|
||||
using std::min;
|
||||
return abs2(x - y) <= min(abs2(x), abs2(y)) * prec * prec;
|
||||
return abs2(x - y) <= (min)(abs2(x), abs2(y)) * prec * prec;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
2
extern/Eigen3/Eigen/src/Core/MatrixBase.h
vendored
2
extern/Eigen3/Eigen/src/Core/MatrixBase.h
vendored
@@ -111,7 +111,7 @@ template<typename Derived> class MatrixBase
|
||||
|
||||
/** \returns the size of the main diagonal, which is min(rows(),cols()).
|
||||
* \sa rows(), cols(), SizeAtCompileTime. */
|
||||
inline Index diagonalSize() const { return std::min(rows(),cols()); }
|
||||
inline Index diagonalSize() const { return (std::min)(rows(),cols()); }
|
||||
|
||||
/** \brief The plain matrix type corresponding to this expression.
|
||||
*
|
||||
|
||||
4
extern/Eigen3/Eigen/src/Core/NumTraits.h
vendored
4
extern/Eigen3/Eigen/src/Core/NumTraits.h
vendored
@@ -87,8 +87,8 @@ template<typename T> struct GenericNumTraits
|
||||
// make sure to override this for floating-point types
|
||||
return Real(0);
|
||||
}
|
||||
inline static T highest() { return std::numeric_limits<T>::max(); }
|
||||
inline static T lowest() { return IsInteger ? std::numeric_limits<T>::min() : (-std::numeric_limits<T>::max()); }
|
||||
inline static T highest() { return (std::numeric_limits<T>::max)(); }
|
||||
inline static T lowest() { return IsInteger ? (std::numeric_limits<T>::min)() : (-(std::numeric_limits<T>::max)()); }
|
||||
|
||||
#ifdef EIGEN2_SUPPORT
|
||||
enum {
|
||||
|
||||
11
extern/Eigen3/Eigen/src/Core/PlainObjectBase.h
vendored
11
extern/Eigen3/Eigen/src/Core/PlainObjectBase.h
vendored
@@ -425,9 +425,6 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
|
||||
* while the AlignedMap() functions return aligned Map objects and thus should be called only with 16-byte-aligned
|
||||
* \a data pointers.
|
||||
*
|
||||
* These methods do not allow to specify strides. If you need to specify strides, you have to
|
||||
* use the Map class directly.
|
||||
*
|
||||
* \see class Map
|
||||
*/
|
||||
//@{
|
||||
@@ -647,8 +644,8 @@ struct internal::conservative_resize_like_impl
|
||||
{
|
||||
// The storage order does not allow us to use reallocation.
|
||||
typename Derived::PlainObject tmp(rows,cols);
|
||||
const Index common_rows = std::min(rows, _this.rows());
|
||||
const Index common_cols = std::min(cols, _this.cols());
|
||||
const Index common_rows = (std::min)(rows, _this.rows());
|
||||
const Index common_cols = (std::min)(cols, _this.cols());
|
||||
tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
|
||||
_this.derived().swap(tmp);
|
||||
}
|
||||
@@ -681,8 +678,8 @@ struct internal::conservative_resize_like_impl
|
||||
{
|
||||
// The storage order does not allow us to use reallocation.
|
||||
typename Derived::PlainObject tmp(other);
|
||||
const Index common_rows = std::min(tmp.rows(), _this.rows());
|
||||
const Index common_cols = std::min(tmp.cols(), _this.cols());
|
||||
const Index common_rows = (std::min)(tmp.rows(), _this.rows());
|
||||
const Index common_cols = (std::min)(tmp.cols(), _this.cols());
|
||||
tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
|
||||
_this.derived().swap(tmp);
|
||||
}
|
||||
|
||||
10
extern/Eigen3/Eigen/src/Core/StableNorm.h
vendored
10
extern/Eigen3/Eigen/src/Core/StableNorm.h
vendored
@@ -69,7 +69,7 @@ MatrixBase<Derived>::stableNorm() const
|
||||
if (bi>0)
|
||||
internal::stable_norm_kernel(this->head(bi), ssq, scale, invScale);
|
||||
for (; bi<n; bi+=blockSize)
|
||||
internal::stable_norm_kernel(this->segment(bi,min(blockSize, n - bi)).template forceAlignedAccessIf<Alignment>(), ssq, scale, invScale);
|
||||
internal::stable_norm_kernel(this->segment(bi,(min)(blockSize, n - bi)).template forceAlignedAccessIf<Alignment>(), ssq, scale, invScale);
|
||||
return scale * internal::sqrt(ssq);
|
||||
}
|
||||
|
||||
@@ -103,12 +103,12 @@ MatrixBase<Derived>::blueNorm() const
|
||||
// For portability, the PORT subprograms "ilmaeh" and "rlmach"
|
||||
// are used. For any specific computer, each of the assignment
|
||||
// statements can be replaced
|
||||
nbig = std::numeric_limits<Index>::max(); // largest integer
|
||||
nbig = (std::numeric_limits<Index>::max)(); // largest integer
|
||||
ibeta = std::numeric_limits<RealScalar>::radix; // base for floating-point numbers
|
||||
it = std::numeric_limits<RealScalar>::digits; // number of base-beta digits in mantissa
|
||||
iemin = std::numeric_limits<RealScalar>::min_exponent; // minimum exponent
|
||||
iemax = std::numeric_limits<RealScalar>::max_exponent; // maximum exponent
|
||||
rbig = std::numeric_limits<RealScalar>::max(); // largest floating-point number
|
||||
rbig = (std::numeric_limits<RealScalar>::max)(); // largest floating-point number
|
||||
|
||||
iexp = -((1-iemin)/2);
|
||||
b1 = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // lower boundary of midrange
|
||||
@@ -167,8 +167,8 @@ MatrixBase<Derived>::blueNorm() const
|
||||
}
|
||||
else
|
||||
return internal::sqrt(amed);
|
||||
asml = min(abig, amed);
|
||||
abig = max(abig, amed);
|
||||
asml = (min)(abig, amed);
|
||||
abig = (max)(abig, amed);
|
||||
if(asml <= abig*relerr)
|
||||
return abig;
|
||||
else
|
||||
|
||||
17
extern/Eigen3/Eigen/src/Core/TriangularMatrix.h
vendored
17
extern/Eigen3/Eigen/src/Core/TriangularMatrix.h
vendored
@@ -111,6 +111,7 @@ template<typename Derived> class TriangularBase : public EigenBase<Derived>
|
||||
EIGEN_ONLY_USED_FOR_DEBUG(col);
|
||||
eigen_assert(col>=0 && col<cols() && row>=0 && row<rows());
|
||||
const int mode = int(Mode) & ~SelfAdjoint;
|
||||
EIGEN_ONLY_USED_FOR_DEBUG(mode);
|
||||
eigen_assert((mode==Upper && col>=row)
|
||||
|| (mode==Lower && col<=row)
|
||||
|| ((mode==StrictlyUpper || mode==UnitUpper) && col>row)
|
||||
@@ -491,7 +492,7 @@ struct triangular_assignment_selector<Derived1, Derived2, Upper, Dynamic, ClearO
|
||||
{
|
||||
for(Index j = 0; j < dst.cols(); ++j)
|
||||
{
|
||||
Index maxi = std::min(j, dst.rows()-1);
|
||||
Index maxi = (std::min)(j, dst.rows()-1);
|
||||
for(Index i = 0; i <= maxi; ++i)
|
||||
dst.copyCoeff(i, j, src);
|
||||
if (ClearOpposite)
|
||||
@@ -511,7 +512,7 @@ struct triangular_assignment_selector<Derived1, Derived2, Lower, Dynamic, ClearO
|
||||
{
|
||||
for(Index i = j; i < dst.rows(); ++i)
|
||||
dst.copyCoeff(i, j, src);
|
||||
Index maxi = std::min(j, dst.rows());
|
||||
Index maxi = (std::min)(j, dst.rows());
|
||||
if (ClearOpposite)
|
||||
for(Index i = 0; i < maxi; ++i)
|
||||
dst.coeffRef(i, j) = static_cast<typename Derived1::Scalar>(0);
|
||||
@@ -527,7 +528,7 @@ struct triangular_assignment_selector<Derived1, Derived2, StrictlyUpper, Dynamic
|
||||
{
|
||||
for(Index j = 0; j < dst.cols(); ++j)
|
||||
{
|
||||
Index maxi = std::min(j, dst.rows());
|
||||
Index maxi = (std::min)(j, dst.rows());
|
||||
for(Index i = 0; i < maxi; ++i)
|
||||
dst.copyCoeff(i, j, src);
|
||||
if (ClearOpposite)
|
||||
@@ -547,7 +548,7 @@ struct triangular_assignment_selector<Derived1, Derived2, StrictlyLower, Dynamic
|
||||
{
|
||||
for(Index i = j+1; i < dst.rows(); ++i)
|
||||
dst.copyCoeff(i, j, src);
|
||||
Index maxi = std::min(j, dst.rows()-1);
|
||||
Index maxi = (std::min)(j, dst.rows()-1);
|
||||
if (ClearOpposite)
|
||||
for(Index i = 0; i <= maxi; ++i)
|
||||
dst.coeffRef(i, j) = static_cast<typename Derived1::Scalar>(0);
|
||||
@@ -563,7 +564,7 @@ struct triangular_assignment_selector<Derived1, Derived2, UnitUpper, Dynamic, Cl
|
||||
{
|
||||
for(Index j = 0; j < dst.cols(); ++j)
|
||||
{
|
||||
Index maxi = std::min(j, dst.rows());
|
||||
Index maxi = (std::min)(j, dst.rows());
|
||||
for(Index i = 0; i < maxi; ++i)
|
||||
dst.copyCoeff(i, j, src);
|
||||
if (ClearOpposite)
|
||||
@@ -583,7 +584,7 @@ struct triangular_assignment_selector<Derived1, Derived2, UnitLower, Dynamic, Cl
|
||||
{
|
||||
for(Index j = 0; j < dst.cols(); ++j)
|
||||
{
|
||||
Index maxi = std::min(j, dst.rows());
|
||||
Index maxi = (std::min)(j, dst.rows());
|
||||
for(Index i = maxi+1; i < dst.rows(); ++i)
|
||||
dst.copyCoeff(i, j, src);
|
||||
if (ClearOpposite)
|
||||
@@ -795,7 +796,7 @@ bool MatrixBase<Derived>::isUpperTriangular(RealScalar prec) const
|
||||
RealScalar maxAbsOnUpperPart = static_cast<RealScalar>(-1);
|
||||
for(Index j = 0; j < cols(); ++j)
|
||||
{
|
||||
Index maxi = std::min(j, rows()-1);
|
||||
Index maxi = (std::min)(j, rows()-1);
|
||||
for(Index i = 0; i <= maxi; ++i)
|
||||
{
|
||||
RealScalar absValue = internal::abs(coeff(i,j));
|
||||
@@ -827,7 +828,7 @@ bool MatrixBase<Derived>::isLowerTriangular(RealScalar prec) const
|
||||
RealScalar threshold = maxAbsOnLowerPart * prec;
|
||||
for(Index j = 1; j < cols(); ++j)
|
||||
{
|
||||
Index maxi = std::min(j, rows()-1);
|
||||
Index maxi = (std::min)(j, rows()-1);
|
||||
for(Index i = 0; i < maxi; ++i)
|
||||
if(internal::abs(coeff(i, j)) > threshold) return false;
|
||||
}
|
||||
|
||||
@@ -81,6 +81,7 @@ inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1=0, std::ptrdi
|
||||
template<typename LhsScalar, typename RhsScalar, int KcFactor>
|
||||
void computeProductBlockingSizes(std::ptrdiff_t& k, std::ptrdiff_t& m, std::ptrdiff_t& n)
|
||||
{
|
||||
EIGEN_UNUSED_VARIABLE(n);
|
||||
// Explanations:
|
||||
// Let's recall the product algorithms form kc x nc horizontal panels B' on the rhs and
|
||||
// mc x kc blocks A' on the lhs. A' has to fit into L2 cache. Moreover, B' is processed
|
||||
@@ -102,7 +103,6 @@ void computeProductBlockingSizes(std::ptrdiff_t& k, std::ptrdiff_t& m, std::ptrd
|
||||
k = std::min<std::ptrdiff_t>(k, l1/kdiv);
|
||||
std::ptrdiff_t _m = k>0 ? l2/(4 * sizeof(LhsScalar) * k) : 0;
|
||||
if(_m<m) m = _m & mr_mask;
|
||||
n = n;
|
||||
}
|
||||
|
||||
template<typename LhsScalar, typename RhsScalar>
|
||||
|
||||
@@ -78,7 +78,7 @@ static void run(Index rows, Index cols, Index depth,
|
||||
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
|
||||
|
||||
Index kc = blocking.kc(); // cache block size along the K direction
|
||||
Index mc = std::min(rows,blocking.mc()); // cache block size along the M direction
|
||||
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
|
||||
//Index nc = blocking.nc(); // cache block size along the N direction
|
||||
|
||||
gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
|
||||
@@ -103,7 +103,7 @@ static void run(Index rows, Index cols, Index depth,
|
||||
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
|
||||
for(Index k=0; k<depth; k+=kc)
|
||||
{
|
||||
const Index actual_kc = std::min(k+kc,depth)-k; // => rows of B', and cols of the A'
|
||||
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
|
||||
|
||||
// In order to reduce the chance that a thread has to wait for the other,
|
||||
// let's start by packing A'.
|
||||
@@ -140,7 +140,7 @@ static void run(Index rows, Index cols, Index depth,
|
||||
// Then keep going as usual with the remaining A'
|
||||
for(Index i=mc; i<rows; i+=mc)
|
||||
{
|
||||
const Index actual_mc = std::min(i+mc,rows)-i;
|
||||
const Index actual_mc = (std::min)(i+mc,rows)-i;
|
||||
|
||||
// pack A_i,k to A'
|
||||
pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
|
||||
@@ -174,7 +174,7 @@ static void run(Index rows, Index cols, Index depth,
|
||||
// (==GEMM_VAR1)
|
||||
for(Index k2=0; k2<depth; k2+=kc)
|
||||
{
|
||||
const Index actual_kc = std::min(k2+kc,depth)-k2;
|
||||
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
|
||||
|
||||
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
|
||||
// => Pack rhs's panel into a sequential chunk of memory (L2 caching)
|
||||
@@ -187,7 +187,7 @@ static void run(Index rows, Index cols, Index depth,
|
||||
// (==GEPP_VAR1)
|
||||
for(Index i2=0; i2<rows; i2+=mc)
|
||||
{
|
||||
const Index actual_mc = std::min(i2+mc,rows)-i2;
|
||||
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
|
||||
|
||||
// We pack the lhs's block into a sequential chunk of memory (L1 caching)
|
||||
// Note that this block will be read a very high number of times, which is equal to the number of
|
||||
|
||||
@@ -96,14 +96,14 @@ struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,
|
||||
|
||||
for(Index k2=0; k2<depth; k2+=kc)
|
||||
{
|
||||
const Index actual_kc = std::min(k2+kc,depth)-k2;
|
||||
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
|
||||
|
||||
// note that the actual rhs is the transpose/adjoint of mat
|
||||
pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, size);
|
||||
|
||||
for(Index i2=0; i2<size; i2+=mc)
|
||||
{
|
||||
const Index actual_mc = std::min(i2+mc,size)-i2;
|
||||
const Index actual_mc = (std::min)(i2+mc,size)-i2;
|
||||
|
||||
pack_lhs(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
|
||||
|
||||
@@ -112,7 +112,7 @@ struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,
|
||||
// 2 - the actual_mc x actual_mc symmetric block => processed with a special kernel
|
||||
// 3 - after the diagonal => processed with gebp or skipped
|
||||
if (UpLo==Lower)
|
||||
gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, std::min(size,i2), alpha,
|
||||
gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, (std::min)(size,i2), alpha,
|
||||
-1, -1, 0, 0, allocatedBlockB);
|
||||
|
||||
sybb(res+resStride*i2 + i2, resStride, blockA, blockB + actual_kc*i2, actual_mc, actual_kc, alpha, allocatedBlockB);
|
||||
@@ -120,7 +120,7 @@ struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,
|
||||
if (UpLo==Upper)
|
||||
{
|
||||
Index j2 = i2+actual_mc;
|
||||
gebp(res+resStride*j2+i2, resStride, blockA, blockB+actual_kc*j2, actual_mc, actual_kc, std::max(Index(0), size-j2), alpha,
|
||||
gebp(res+resStride*j2+i2, resStride, blockA, blockB+actual_kc*j2, actual_mc, actual_kc, (std::max)(Index(0), size-j2), alpha,
|
||||
-1, -1, 0, 0, allocatedBlockB);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -134,7 +134,7 @@ EIGEN_DONT_INLINE static void run(
|
||||
}
|
||||
else
|
||||
{
|
||||
skipColumns = std::min(skipColumns,cols);
|
||||
skipColumns = (std::min)(skipColumns,cols);
|
||||
// note that the skiped columns are processed later.
|
||||
}
|
||||
|
||||
@@ -386,7 +386,7 @@ EIGEN_DONT_INLINE static void run(
|
||||
}
|
||||
else
|
||||
{
|
||||
skipRows = std::min(skipRows,Index(rows));
|
||||
skipRows = (std::min)(skipRows,Index(rows));
|
||||
// note that the skiped columns are processed later.
|
||||
}
|
||||
eigen_internal_assert( alignmentPattern==NoneAligned
|
||||
|
||||
@@ -114,7 +114,7 @@ struct symm_pack_rhs
|
||||
}
|
||||
|
||||
// second part: diagonal block
|
||||
for(Index j2=k2; j2<std::min(k2+rows,packet_cols); j2+=nr)
|
||||
for(Index j2=k2; j2<(std::min)(k2+rows,packet_cols); j2+=nr)
|
||||
{
|
||||
// again we can split vertically in three different parts (transpose, symmetric, normal)
|
||||
// transpose
|
||||
@@ -179,7 +179,7 @@ struct symm_pack_rhs
|
||||
for(Index j2=packet_cols; j2<cols; ++j2)
|
||||
{
|
||||
// transpose
|
||||
Index half = std::min(end_k,j2);
|
||||
Index half = (std::min)(end_k,j2);
|
||||
for(Index k=k2; k<half; k++)
|
||||
{
|
||||
blockB[count] = conj(rhs(j2,k));
|
||||
@@ -261,7 +261,7 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs
|
||||
Index nc = cols; // cache block size along the N direction
|
||||
computeProductBlockingSizes<Scalar,Scalar>(kc, mc, nc);
|
||||
// kc must smaller than mc
|
||||
kc = std::min(kc,mc);
|
||||
kc = (std::min)(kc,mc);
|
||||
|
||||
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
|
||||
std::size_t sizeB = sizeW + kc*cols;
|
||||
@@ -276,7 +276,7 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs
|
||||
|
||||
for(Index k2=0; k2<size; k2+=kc)
|
||||
{
|
||||
const Index actual_kc = std::min(k2+kc,size)-k2;
|
||||
const Index actual_kc = (std::min)(k2+kc,size)-k2;
|
||||
|
||||
// we have selected one row panel of rhs and one column panel of lhs
|
||||
// pack rhs's panel into a sequential chunk of memory
|
||||
@@ -289,7 +289,7 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs
|
||||
// 3 - the panel below the diagonal block => generic packed copy
|
||||
for(Index i2=0; i2<k2; i2+=mc)
|
||||
{
|
||||
const Index actual_mc = std::min(i2+mc,k2)-i2;
|
||||
const Index actual_mc = (std::min)(i2+mc,k2)-i2;
|
||||
// transposed packed copy
|
||||
pack_lhs_transposed(blockA, &lhs(k2, i2), lhsStride, actual_kc, actual_mc);
|
||||
|
||||
@@ -297,7 +297,7 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs
|
||||
}
|
||||
// the block diagonal
|
||||
{
|
||||
const Index actual_mc = std::min(k2+kc,size)-k2;
|
||||
const Index actual_mc = (std::min)(k2+kc,size)-k2;
|
||||
// symmetric packed copy
|
||||
pack_lhs(blockA, &lhs(k2,k2), lhsStride, actual_kc, actual_mc);
|
||||
|
||||
@@ -306,7 +306,7 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs
|
||||
|
||||
for(Index i2=k2+kc; i2<size; i2+=mc)
|
||||
{
|
||||
const Index actual_mc = std::min(i2+mc,size)-i2;
|
||||
const Index actual_mc = (std::min)(i2+mc,size)-i2;
|
||||
gemm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder,false>()
|
||||
(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
|
||||
|
||||
@@ -352,14 +352,14 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLh
|
||||
|
||||
for(Index k2=0; k2<size; k2+=kc)
|
||||
{
|
||||
const Index actual_kc = std::min(k2+kc,size)-k2;
|
||||
const Index actual_kc = (std::min)(k2+kc,size)-k2;
|
||||
|
||||
pack_rhs(blockB, _rhs, rhsStride, actual_kc, cols, k2);
|
||||
|
||||
// => GEPP
|
||||
for(Index i2=0; i2<rows; i2+=mc)
|
||||
{
|
||||
const Index actual_mc = std::min(i2+mc,rows)-i2;
|
||||
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
|
||||
pack_lhs(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
|
||||
|
||||
gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha);
|
||||
|
||||
@@ -70,7 +70,7 @@ static EIGEN_DONT_INLINE void product_selfadjoint_vector(
|
||||
rhs[i] = *it;
|
||||
}
|
||||
|
||||
Index bound = std::max(Index(0),size-8) & 0xfffffffe;
|
||||
Index bound = (std::max)(Index(0),size-8) & 0xfffffffe;
|
||||
if (FirstTriangular)
|
||||
bound = size - bound;
|
||||
|
||||
|
||||
@@ -112,7 +112,7 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,true,
|
||||
Scalar alpha)
|
||||
{
|
||||
// strip zeros
|
||||
Index diagSize = std::min(_rows,_depth);
|
||||
Index diagSize = (std::min)(_rows,_depth);
|
||||
Index rows = IsLower ? _rows : diagSize;
|
||||
Index depth = IsLower ? diagSize : _depth;
|
||||
Index cols = _cols;
|
||||
@@ -145,7 +145,7 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,true,
|
||||
IsLower ? k2>0 : k2<depth;
|
||||
IsLower ? k2-=kc : k2+=kc)
|
||||
{
|
||||
Index actual_kc = std::min(IsLower ? k2 : depth-k2, kc);
|
||||
Index actual_kc = (std::min)(IsLower ? k2 : depth-k2, kc);
|
||||
Index actual_k2 = IsLower ? k2-actual_kc : k2;
|
||||
|
||||
// align blocks with the end of the triangular part for trapezoidal lhs
|
||||
@@ -203,10 +203,10 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,true,
|
||||
// the part below (lower case) or above (upper case) the diagonal => GEPP
|
||||
{
|
||||
Index start = IsLower ? k2 : 0;
|
||||
Index end = IsLower ? rows : std::min(actual_k2,rows);
|
||||
Index end = IsLower ? rows : (std::min)(actual_k2,rows);
|
||||
for(Index i2=start; i2<end; i2+=mc)
|
||||
{
|
||||
const Index actual_mc = std::min(i2+mc,end)-i2;
|
||||
const Index actual_mc = (std::min)(i2+mc,end)-i2;
|
||||
gemm_pack_lhs<Scalar, Index, Traits::mr,Traits::LhsProgress, LhsStorageOrder,false>()
|
||||
(blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc);
|
||||
|
||||
@@ -240,7 +240,7 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
|
||||
Scalar alpha)
|
||||
{
|
||||
// strip zeros
|
||||
Index diagSize = std::min(_cols,_depth);
|
||||
Index diagSize = (std::min)(_cols,_depth);
|
||||
Index rows = _rows;
|
||||
Index depth = IsLower ? _depth : diagSize;
|
||||
Index cols = IsLower ? diagSize : _cols;
|
||||
@@ -275,7 +275,7 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
|
||||
IsLower ? k2<depth : k2>0;
|
||||
IsLower ? k2+=kc : k2-=kc)
|
||||
{
|
||||
Index actual_kc = std::min(IsLower ? depth-k2 : k2, kc);
|
||||
Index actual_kc = (std::min)(IsLower ? depth-k2 : k2, kc);
|
||||
Index actual_k2 = IsLower ? k2 : k2-actual_kc;
|
||||
|
||||
// align blocks with the end of the triangular part for trapezoidal rhs
|
||||
@@ -286,7 +286,7 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
|
||||
}
|
||||
|
||||
// remaining size
|
||||
Index rs = IsLower ? std::min(cols,actual_k2) : cols - k2;
|
||||
Index rs = IsLower ? (std::min)(cols,actual_k2) : cols - k2;
|
||||
// size of the triangular part
|
||||
Index ts = (IsLower && actual_k2>=cols) ? 0 : actual_kc;
|
||||
|
||||
@@ -327,7 +327,7 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
|
||||
|
||||
for (Index i2=0; i2<rows; i2+=mc)
|
||||
{
|
||||
const Index actual_mc = std::min(mc,rows-i2);
|
||||
const Index actual_mc = (std::min)(mc,rows-i2);
|
||||
pack_lhs(blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc);
|
||||
|
||||
// triangular kernel
|
||||
|
||||
@@ -56,7 +56,7 @@ struct product_triangular_matrix_vector<Index,Mode,LhsScalar,ConjLhs,RhsScalar,C
|
||||
|
||||
for (Index pi=0; pi<cols; pi+=PanelWidth)
|
||||
{
|
||||
Index actualPanelWidth = std::min(PanelWidth, cols-pi);
|
||||
Index actualPanelWidth = (std::min)(PanelWidth, cols-pi);
|
||||
for (Index k=0; k<actualPanelWidth; ++k)
|
||||
{
|
||||
Index i = pi + k;
|
||||
@@ -107,7 +107,7 @@ struct product_triangular_matrix_vector<Index,Mode,LhsScalar,ConjLhs,RhsScalar,C
|
||||
|
||||
for (Index pi=0; pi<cols; pi+=PanelWidth)
|
||||
{
|
||||
Index actualPanelWidth = std::min(PanelWidth, cols-pi);
|
||||
Index actualPanelWidth = (std::min)(PanelWidth, cols-pi);
|
||||
for (Index k=0; k<actualPanelWidth; ++k)
|
||||
{
|
||||
Index i = pi + k;
|
||||
|
||||
@@ -85,7 +85,7 @@ struct triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageO
|
||||
IsLower ? k2<size : k2>0;
|
||||
IsLower ? k2+=kc : k2-=kc)
|
||||
{
|
||||
const Index actual_kc = std::min(IsLower ? size-k2 : k2, kc);
|
||||
const Index actual_kc = (std::min)(IsLower ? size-k2 : k2, kc);
|
||||
|
||||
// We have selected and packed a big horizontal panel R1 of rhs. Let B be the packed copy of this panel,
|
||||
// and R2 the remaining part of rhs. The corresponding vertical panel of lhs is split into
|
||||
@@ -164,7 +164,7 @@ struct triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageO
|
||||
Index end = IsLower ? size : k2-kc;
|
||||
for(Index i2=start; i2<end; i2+=mc)
|
||||
{
|
||||
const Index actual_mc = std::min(mc,end-i2);
|
||||
const Index actual_mc = (std::min)(mc,end-i2);
|
||||
if (actual_mc>0)
|
||||
{
|
||||
pack_lhs(blockA, &tri(i2, IsLower ? k2 : k2-kc), triStride, actual_kc, actual_mc);
|
||||
@@ -222,7 +222,7 @@ struct triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorage
|
||||
IsLower ? k2>0 : k2<size;
|
||||
IsLower ? k2-=kc : k2+=kc)
|
||||
{
|
||||
const Index actual_kc = std::min(IsLower ? k2 : size-k2, kc);
|
||||
const Index actual_kc = (std::min)(IsLower ? k2 : size-k2, kc);
|
||||
Index actual_k2 = IsLower ? k2-actual_kc : k2 ;
|
||||
|
||||
Index startPanel = IsLower ? 0 : k2+actual_kc;
|
||||
@@ -251,7 +251,7 @@ struct triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorage
|
||||
|
||||
for(Index i2=0; i2<rows; i2+=mc)
|
||||
{
|
||||
const Index actual_mc = std::min(mc,rows-i2);
|
||||
const Index actual_mc = (std::min)(mc,rows-i2);
|
||||
|
||||
// triangular solver kernel
|
||||
{
|
||||
|
||||
@@ -60,7 +60,7 @@ struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Con
|
||||
IsLower ? pi<size : pi>0;
|
||||
IsLower ? pi+=PanelWidth : pi-=PanelWidth)
|
||||
{
|
||||
Index actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth);
|
||||
Index actualPanelWidth = (std::min)(IsLower ? size - pi : pi, PanelWidth);
|
||||
|
||||
Index r = IsLower ? pi : size - pi; // remaining size
|
||||
if (r > 0)
|
||||
@@ -114,7 +114,7 @@ struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Con
|
||||
IsLower ? pi<size : pi>0;
|
||||
IsLower ? pi+=PanelWidth : pi-=PanelWidth)
|
||||
{
|
||||
Index actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth);
|
||||
Index actualPanelWidth = (std::min)(IsLower ? size - pi : pi, PanelWidth);
|
||||
Index startBlock = IsLower ? pi : pi-actualPanelWidth;
|
||||
Index endBlock = IsLower ? pi + actualPanelWidth : 0;
|
||||
|
||||
|
||||
4
extern/Eigen3/Eigen/src/Core/util/Macros.h
vendored
4
extern/Eigen3/Eigen/src/Core/util/Macros.h
vendored
@@ -28,7 +28,7 @@
|
||||
|
||||
#define EIGEN_WORLD_VERSION 3
|
||||
#define EIGEN_MAJOR_VERSION 0
|
||||
#define EIGEN_MINOR_VERSION 1
|
||||
#define EIGEN_MINOR_VERSION 2
|
||||
|
||||
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
|
||||
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
|
||||
@@ -399,7 +399,7 @@
|
||||
#define EIGEN_MAKE_CWISE_BINARY_OP(METHOD,FUNCTOR) \
|
||||
template<typename OtherDerived> \
|
||||
EIGEN_STRONG_INLINE const CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived> \
|
||||
METHOD(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
|
||||
(METHOD)(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
|
||||
{ \
|
||||
return CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); \
|
||||
}
|
||||
|
||||
10
extern/Eigen3/Eigen/src/Core/util/Memory.h
vendored
10
extern/Eigen3/Eigen/src/Core/util/Memory.h
vendored
@@ -156,7 +156,7 @@ inline void* generic_aligned_realloc(void* ptr, size_t size, size_t old_size)
|
||||
|
||||
if (ptr != 0)
|
||||
{
|
||||
std::memcpy(newptr, ptr, std::min(size,old_size));
|
||||
std::memcpy(newptr, ptr, (std::min)(size,old_size));
|
||||
aligned_free(ptr);
|
||||
}
|
||||
|
||||
@@ -663,12 +663,12 @@ public:
|
||||
|
||||
size_type max_size() const throw()
|
||||
{
|
||||
return std::numeric_limits<size_type>::max();
|
||||
return (std::numeric_limits<size_type>::max)();
|
||||
}
|
||||
|
||||
pointer allocate( size_type num, const_pointer* hint = 0 )
|
||||
pointer allocate( size_type num, const void* hint = 0 )
|
||||
{
|
||||
static_cast<void>( hint ); // suppress unused variable warning
|
||||
EIGEN_UNUSED_VARIABLE(hint);
|
||||
return static_cast<pointer>( internal::aligned_malloc( num * sizeof(T) ) );
|
||||
}
|
||||
|
||||
@@ -903,7 +903,7 @@ inline int queryTopLevelCacheSize()
|
||||
{
|
||||
int l1, l2(-1), l3(-1);
|
||||
queryCacheSizes(l1,l2,l3);
|
||||
return std::max(l2,l3);
|
||||
return (std::max)(l2,l3);
|
||||
}
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
@@ -82,13 +82,17 @@ template<typename ExpressionType> class Cwise
|
||||
const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)
|
||||
operator/(const MatrixBase<OtherDerived> &other) const;
|
||||
|
||||
/** \deprecated ArrayBase::min() */
|
||||
template<typename OtherDerived>
|
||||
const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op)
|
||||
min(const MatrixBase<OtherDerived> &other) const;
|
||||
(min)(const MatrixBase<OtherDerived> &other) const
|
||||
{ return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op)(_expression(), other.derived()); }
|
||||
|
||||
/** \deprecated ArrayBase::max() */
|
||||
template<typename OtherDerived>
|
||||
const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op)
|
||||
max(const MatrixBase<OtherDerived> &other) const;
|
||||
(max)(const MatrixBase<OtherDerived> &other) const
|
||||
{ return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op)(_expression(), other.derived()); }
|
||||
|
||||
const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_abs_op) abs() const;
|
||||
const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_abs2_op) abs2() const;
|
||||
|
||||
@@ -96,24 +96,6 @@ inline ExpressionType& Cwise<ExpressionType>::operator/=(const MatrixBase<OtherD
|
||||
return m_matrix.const_cast_derived() = *this / other;
|
||||
}
|
||||
|
||||
/** \deprecated ArrayBase::min() */
|
||||
template<typename ExpressionType>
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op)
|
||||
Cwise<ExpressionType>::min(const MatrixBase<OtherDerived> &other) const
|
||||
{
|
||||
return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op)(_expression(), other.derived());
|
||||
}
|
||||
|
||||
/** \deprecated ArrayBase::max() */
|
||||
template<typename ExpressionType>
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op)
|
||||
Cwise<ExpressionType>::max(const MatrixBase<OtherDerived> &other) const
|
||||
{
|
||||
return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op)(_expression(), other.derived());
|
||||
}
|
||||
|
||||
/***************************************************************************
|
||||
* The following functions were defined in Array
|
||||
***************************************************************************/
|
||||
|
||||
@@ -63,7 +63,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==
|
||||
~AlignedBox() {}
|
||||
|
||||
/** \returns the dimension in which the box holds */
|
||||
inline int dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : AmbientDimAtCompileTime; }
|
||||
inline int dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : int(AmbientDimAtCompileTime); }
|
||||
|
||||
/** \returns true if the box is null, i.e, empty. */
|
||||
inline bool isNull() const { return (m_min.cwise() > m_max).any(); }
|
||||
@@ -71,18 +71,18 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==
|
||||
/** Makes \c *this a null/empty box. */
|
||||
inline void setNull()
|
||||
{
|
||||
m_min.setConstant( std::numeric_limits<Scalar>::max());
|
||||
m_max.setConstant(-std::numeric_limits<Scalar>::max());
|
||||
m_min.setConstant( (std::numeric_limits<Scalar>::max)());
|
||||
m_max.setConstant(-(std::numeric_limits<Scalar>::max)());
|
||||
}
|
||||
|
||||
/** \returns the minimal corner */
|
||||
inline const VectorType& min() const { return m_min; }
|
||||
inline const VectorType& (min)() const { return m_min; }
|
||||
/** \returns a non const reference to the minimal corner */
|
||||
inline VectorType& min() { return m_min; }
|
||||
inline VectorType& (min)() { return m_min; }
|
||||
/** \returns the maximal corner */
|
||||
inline const VectorType& max() const { return m_max; }
|
||||
inline const VectorType& (max)() const { return m_max; }
|
||||
/** \returns a non const reference to the maximal corner */
|
||||
inline VectorType& max() { return m_max; }
|
||||
inline VectorType& (max)() { return m_max; }
|
||||
|
||||
/** \returns true if the point \a p is inside the box \c *this. */
|
||||
inline bool contains(const VectorType& p) const
|
||||
@@ -90,19 +90,19 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==
|
||||
|
||||
/** \returns true if the box \a b is entirely inside the box \c *this. */
|
||||
inline bool contains(const AlignedBox& b) const
|
||||
{ return (m_min.cwise()<=b.min()).all() && (b.max().cwise()<=m_max).all(); }
|
||||
{ return (m_min.cwise()<=(b.min)()).all() && ((b.max)().cwise()<=m_max).all(); }
|
||||
|
||||
/** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */
|
||||
inline AlignedBox& extend(const VectorType& p)
|
||||
{ m_min = m_min.cwise().min(p); m_max = m_max.cwise().max(p); return *this; }
|
||||
{ m_min = (m_min.cwise().min)(p); m_max = (m_max.cwise().max)(p); return *this; }
|
||||
|
||||
/** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */
|
||||
inline AlignedBox& extend(const AlignedBox& b)
|
||||
{ m_min = m_min.cwise().min(b.m_min); m_max = m_max.cwise().max(b.m_max); return *this; }
|
||||
{ m_min = (m_min.cwise().min)(b.m_min); m_max = (m_max.cwise().max)(b.m_max); return *this; }
|
||||
|
||||
/** Clamps \c *this by the box \a b and returns a reference to \c *this. */
|
||||
inline AlignedBox& clamp(const AlignedBox& b)
|
||||
{ m_min = m_min.cwise().max(b.m_min); m_max = m_max.cwise().min(b.m_max); return *this; }
|
||||
{ m_min = (m_min.cwise().max)(b.m_min); m_max = (m_max.cwise().min)(b.m_max); return *this; }
|
||||
|
||||
/** Translate \c *this by the vector \a t and returns a reference to \c *this. */
|
||||
inline AlignedBox& translate(const VectorType& t)
|
||||
@@ -138,8 +138,8 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==
|
||||
template<typename OtherScalarType>
|
||||
inline explicit AlignedBox(const AlignedBox<OtherScalarType,AmbientDimAtCompileTime>& other)
|
||||
{
|
||||
m_min = other.min().template cast<Scalar>();
|
||||
m_max = other.max().template cast<Scalar>();
|
||||
m_min = (other.min)().template cast<Scalar>();
|
||||
m_max = (other.max)().template cast<Scalar>();
|
||||
}
|
||||
|
||||
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
|
||||
|
||||
18
extern/Eigen3/Eigen/src/Eigen2Support/SVD.h
vendored
18
extern/Eigen3/Eigen/src/Eigen2Support/SVD.h
vendored
@@ -64,9 +64,9 @@ template<typename MatrixType> class SVD
|
||||
SVD() {} // a user who relied on compiler-generated default compiler reported problems with MSVC in 2.0.7
|
||||
|
||||
SVD(const MatrixType& matrix)
|
||||
: m_matU(matrix.rows(), std::min(matrix.rows(), matrix.cols())),
|
||||
: m_matU(matrix.rows(), (std::min)(matrix.rows(), matrix.cols())),
|
||||
m_matV(matrix.cols(),matrix.cols()),
|
||||
m_sigma(std::min(matrix.rows(),matrix.cols()))
|
||||
m_sigma((std::min)(matrix.rows(),matrix.cols()))
|
||||
{
|
||||
compute(matrix);
|
||||
}
|
||||
@@ -108,13 +108,13 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
|
||||
{
|
||||
const int m = matrix.rows();
|
||||
const int n = matrix.cols();
|
||||
const int nu = std::min(m,n);
|
||||
const int nu = (std::min)(m,n);
|
||||
ei_assert(m>=n && "In Eigen 2.0, SVD only works for MxN matrices with M>=N. Sorry!");
|
||||
ei_assert(m>1 && "In Eigen 2.0, SVD doesn't work on 1x1 matrices");
|
||||
|
||||
m_matU.resize(m, nu);
|
||||
m_matU.setZero();
|
||||
m_sigma.resize(std::min(m,n));
|
||||
m_sigma.resize((std::min)(m,n));
|
||||
m_matV.resize(n,n);
|
||||
|
||||
RowVector e(n);
|
||||
@@ -126,9 +126,9 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
|
||||
|
||||
// Reduce A to bidiagonal form, storing the diagonal elements
|
||||
// in s and the super-diagonal elements in e.
|
||||
int nct = std::min(m-1,n);
|
||||
int nrt = std::max(0,std::min(n-2,m));
|
||||
for (k = 0; k < std::max(nct,nrt); ++k)
|
||||
int nct = (std::min)(m-1,n);
|
||||
int nrt = (std::max)(0,(std::min)(n-2,m));
|
||||
for (k = 0; k < (std::max)(nct,nrt); ++k)
|
||||
{
|
||||
if (k < nct)
|
||||
{
|
||||
@@ -193,7 +193,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
|
||||
|
||||
|
||||
// Set up the final bidiagonal matrix or order p.
|
||||
int p = std::min(n,m+1);
|
||||
int p = (std::min)(n,m+1);
|
||||
if (nct < n)
|
||||
m_sigma[nct] = matA(nct,nct);
|
||||
if (m < p)
|
||||
@@ -380,7 +380,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
|
||||
case 3:
|
||||
{
|
||||
// Calculate the shift.
|
||||
Scalar scale = std::max(std::max(std::max(std::max(
|
||||
Scalar scale = (std::max)((std::max)((std::max)((std::max)(
|
||||
ei_abs(m_sigma[p-1]),ei_abs(m_sigma[p-2])),ei_abs(e[p-2])),
|
||||
ei_abs(m_sigma[k])),ei_abs(e[k]));
|
||||
Scalar sp = m_sigma[p-1]/scale;
|
||||
|
||||
@@ -423,7 +423,7 @@ void ComplexSchur<MatrixType>::reduceToTriangularForm(bool computeU)
|
||||
JacobiRotation<ComplexScalar> rot;
|
||||
rot.makeGivens(m_matT.coeff(il,il) - shift, m_matT.coeff(il+1,il));
|
||||
m_matT.rightCols(m_matT.cols()-il).applyOnTheLeft(il, il+1, rot.adjoint());
|
||||
m_matT.topRows(std::min(il+2,iu)+1).applyOnTheRight(il, il+1, rot);
|
||||
m_matT.topRows((std::min)(il+2,iu)+1).applyOnTheRight(il, il+1, rot);
|
||||
if(computeU) m_matU.applyOnTheRight(il, il+1, rot);
|
||||
|
||||
for(Index i=il+1 ; i<iu ; i++)
|
||||
@@ -431,7 +431,7 @@ void ComplexSchur<MatrixType>::reduceToTriangularForm(bool computeU)
|
||||
rot.makeGivens(m_matT.coeffRef(i,i-1), m_matT.coeffRef(i+1,i-1), &m_matT.coeffRef(i,i-1));
|
||||
m_matT.coeffRef(i+1,i-1) = ComplexScalar(0);
|
||||
m_matT.rightCols(m_matT.cols()-i).applyOnTheLeft(i, i+1, rot.adjoint());
|
||||
m_matT.topRows(std::min(i+2,iu)+1).applyOnTheRight(i, i+1, rot);
|
||||
m_matT.topRows((std::min)(i+2,iu)+1).applyOnTheRight(i, i+1, rot);
|
||||
if(computeU) m_matU.applyOnTheRight(i, i+1, rot);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -435,7 +435,7 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
|
||||
Scalar norm = 0.0;
|
||||
for (Index j = 0; j < size; ++j)
|
||||
{
|
||||
norm += m_matT.row(j).segment(std::max(j-1,Index(0)), size-std::max(j-1,Index(0))).cwiseAbs().sum();
|
||||
norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum();
|
||||
}
|
||||
|
||||
// Backsubstitute to find vectors of upper triangular form
|
||||
@@ -564,7 +564,7 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
|
||||
|
||||
// Overflow control
|
||||
using std::max;
|
||||
Scalar t = max(internal::abs(m_matT.coeff(i,n-1)),internal::abs(m_matT.coeff(i,n)));
|
||||
Scalar t = (max)(internal::abs(m_matT.coeff(i,n-1)),internal::abs(m_matT.coeff(i,n)));
|
||||
if ((eps * t) * t > Scalar(1))
|
||||
m_matT.block(i, n-1, size-i, 2) /= t;
|
||||
|
||||
|
||||
@@ -290,7 +290,7 @@ inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
|
||||
// + m_matT.bottomLeftCorner(size-1,size-1).diagonal().cwiseAbs().sum();
|
||||
Scalar norm = 0.0;
|
||||
for (Index j = 0; j < size; ++j)
|
||||
norm += m_matT.row(j).segment(std::max(j-1,Index(0)), size-std::max(j-1,Index(0))).cwiseAbs().sum();
|
||||
norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum();
|
||||
return norm;
|
||||
}
|
||||
|
||||
@@ -442,7 +442,7 @@ inline void RealSchur<MatrixType>::performFrancisQRStep(Index il, Index im, Inde
|
||||
|
||||
// These Householder transformations form the O(n^3) part of the algorithm
|
||||
m_matT.block(k, k, 3, size-k).applyHouseholderOnTheLeft(ess, tau, workspace);
|
||||
m_matT.block(0, k, std::min(iu,k+3) + 1, 3).applyHouseholderOnTheRight(ess, tau, workspace);
|
||||
m_matT.block(0, k, (std::min)(iu,k+3) + 1, 3).applyHouseholderOnTheRight(ess, tau, workspace);
|
||||
if (computeU)
|
||||
m_matU.block(0, k, size, 3).applyHouseholderOnTheRight(ess, tau, workspace);
|
||||
}
|
||||
|
||||
@@ -387,7 +387,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
|
||||
{
|
||||
m_eivalues.coeffRef(0,0) = internal::real(matrix.coeff(0,0));
|
||||
if(computeEigenvectors)
|
||||
m_eivec.setOnes();
|
||||
m_eivec.setOnes(n,n);
|
||||
m_info = Success;
|
||||
m_isInitialized = true;
|
||||
m_eigenvectorsOk = computeEigenvectors;
|
||||
|
||||
14
extern/Eigen3/Eigen/src/Geometry/AlignedBox.h
vendored
14
extern/Eigen3/Eigen/src/Geometry/AlignedBox.h
vendored
@@ -111,13 +111,13 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
||||
}
|
||||
|
||||
/** \returns the minimal corner */
|
||||
inline const VectorType& min() const { return m_min; }
|
||||
inline const VectorType& (min)() const { return m_min; }
|
||||
/** \returns a non const reference to the minimal corner */
|
||||
inline VectorType& min() { return m_min; }
|
||||
inline VectorType& (min)() { return m_min; }
|
||||
/** \returns the maximal corner */
|
||||
inline const VectorType& max() const { return m_max; }
|
||||
inline const VectorType& (max)() const { return m_max; }
|
||||
/** \returns a non const reference to the maximal corner */
|
||||
inline VectorType& max() { return m_max; }
|
||||
inline VectorType& (max)() { return m_max; }
|
||||
|
||||
/** \returns the center of the box */
|
||||
inline const CwiseUnaryOp<internal::scalar_quotient1_op<Scalar>,
|
||||
@@ -196,7 +196,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
||||
|
||||
/** \returns true if the box \a b is entirely inside the box \c *this. */
|
||||
inline bool contains(const AlignedBox& b) const
|
||||
{ return (m_min.array()<=b.min().array()).all() && (b.max().array()<=m_max.array()).all(); }
|
||||
{ return (m_min.array()<=(b.min)().array()).all() && ((b.max)().array()<=m_max.array()).all(); }
|
||||
|
||||
/** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */
|
||||
template<typename Derived>
|
||||
@@ -287,8 +287,8 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
||||
template<typename OtherScalarType>
|
||||
inline explicit AlignedBox(const AlignedBox<OtherScalarType,AmbientDimAtCompileTime>& other)
|
||||
{
|
||||
m_min = other.min().template cast<Scalar>();
|
||||
m_max = other.max().template cast<Scalar>();
|
||||
m_min = (other.min)().template cast<Scalar>();
|
||||
m_max = (other.max)().template cast<Scalar>();
|
||||
}
|
||||
|
||||
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
|
||||
|
||||
2
extern/Eigen3/Eigen/src/Geometry/AngleAxis.h
vendored
2
extern/Eigen3/Eigen/src/Geometry/AngleAxis.h
vendored
@@ -182,7 +182,7 @@ AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionBase<QuatDerived
|
||||
}
|
||||
else
|
||||
{
|
||||
m_angle = Scalar(2)*acos(min(max(Scalar(-1),q.w()),Scalar(1)));
|
||||
m_angle = Scalar(2)*acos((min)((max)(Scalar(-1),q.w()),Scalar(1)));
|
||||
m_axis = q.vec() / internal::sqrt(n2);
|
||||
}
|
||||
return *this;
|
||||
|
||||
@@ -189,7 +189,7 @@ public:
|
||||
*
|
||||
* \note If \a other is approximately parallel to *this, this method will return any point on *this.
|
||||
*/
|
||||
VectorType intersection(const Hyperplane& other)
|
||||
VectorType intersection(const Hyperplane& other) const
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2)
|
||||
Scalar det = coeffs().coeff(0) * other.coeffs().coeff(1) - coeffs().coeff(1) * other.coeffs().coeff(0);
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
*
|
||||
* A parametrized line is defined by an origin point \f$ \mathbf{o} \f$ and a unit
|
||||
* direction vector \f$ \mathbf{d} \f$ such that the line corresponds to
|
||||
* the set \f$ l(t) = \mathbf{o} + t \mathbf{d} \f$, \f$ l \in \mathbf{R} \f$.
|
||||
* the set \f$ l(t) = \mathbf{o} + t \mathbf{d} \f$, \f$ t \in \mathbf{R} \f$.
|
||||
*
|
||||
* \param _Scalar the scalar type, i.e., the type of the coefficients
|
||||
* \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
|
||||
@@ -107,7 +107,7 @@ public:
|
||||
{ return origin() + direction().dot(p-origin()) * direction(); }
|
||||
|
||||
template <int OtherOptions>
|
||||
Scalar intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane);
|
||||
Scalar intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const;
|
||||
|
||||
/** \returns \c *this with scalar type casted to \a NewScalarType
|
||||
*
|
||||
@@ -159,7 +159,7 @@ inline ParametrizedLine<_Scalar, _AmbientDim,_Options>::ParametrizedLine(const H
|
||||
*/
|
||||
template <typename _Scalar, int _AmbientDim, int _Options>
|
||||
template <int OtherOptions>
|
||||
inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane)
|
||||
inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const
|
||||
{
|
||||
return -(hyperplane.offset()+hyperplane.normal().dot(origin()))
|
||||
/ hyperplane.normal().dot(direction());
|
||||
|
||||
4
extern/Eigen3/Eigen/src/LU/FullPivLU.h
vendored
4
extern/Eigen3/Eigen/src/LU/FullPivLU.h
vendored
@@ -533,7 +533,7 @@ template<typename MatrixType>
|
||||
MatrixType FullPivLU<MatrixType>::reconstructedMatrix() const
|
||||
{
|
||||
eigen_assert(m_isInitialized && "LU is not initialized.");
|
||||
const Index smalldim = std::min(m_lu.rows(), m_lu.cols());
|
||||
const Index smalldim = (std::min)(m_lu.rows(), m_lu.cols());
|
||||
// LU
|
||||
MatrixType res(m_lu.rows(),m_lu.cols());
|
||||
// FIXME the .toDenseMatrix() should not be needed...
|
||||
@@ -695,7 +695,7 @@ struct solve_retval<FullPivLU<_MatrixType>, Rhs>
|
||||
const Index rows = dec().rows(), cols = dec().cols(),
|
||||
nonzero_pivots = dec().nonzeroPivots();
|
||||
eigen_assert(rhs().rows() == rows);
|
||||
const Index smalldim = std::min(rows, cols);
|
||||
const Index smalldim = (std::min)(rows, cols);
|
||||
|
||||
if(nonzero_pivots == 0)
|
||||
{
|
||||
|
||||
8
extern/Eigen3/Eigen/src/LU/PartialPivLU.h
vendored
8
extern/Eigen3/Eigen/src/LU/PartialPivLU.h
vendored
@@ -253,7 +253,7 @@ struct partial_lu_impl
|
||||
{
|
||||
const Index rows = lu.rows();
|
||||
const Index cols = lu.cols();
|
||||
const Index size = std::min(rows,cols);
|
||||
const Index size = (std::min)(rows,cols);
|
||||
nb_transpositions = 0;
|
||||
int first_zero_pivot = -1;
|
||||
for(Index k = 0; k < size; ++k)
|
||||
@@ -313,7 +313,7 @@ struct partial_lu_impl
|
||||
MapLU lu1(lu_data,StorageOrder==RowMajor?rows:luStride,StorageOrder==RowMajor?luStride:cols);
|
||||
MatrixType lu(lu1,0,0,rows,cols);
|
||||
|
||||
const Index size = std::min(rows,cols);
|
||||
const Index size = (std::min)(rows,cols);
|
||||
|
||||
// if the matrix is too small, no blocking:
|
||||
if(size<=16)
|
||||
@@ -327,14 +327,14 @@ struct partial_lu_impl
|
||||
{
|
||||
blockSize = size/8;
|
||||
blockSize = (blockSize/16)*16;
|
||||
blockSize = std::min(std::max(blockSize,Index(8)), maxBlockSize);
|
||||
blockSize = (std::min)((std::max)(blockSize,Index(8)), maxBlockSize);
|
||||
}
|
||||
|
||||
nb_transpositions = 0;
|
||||
int first_zero_pivot = -1;
|
||||
for(Index k = 0; k < size; k+=blockSize)
|
||||
{
|
||||
Index bs = std::min(size-k,blockSize); // actual size of the block
|
||||
Index bs = (std::min)(size-k,blockSize); // actual size of the block
|
||||
Index trows = rows - k - bs; // trailing rows
|
||||
Index tsize = size - k - bs; // trailing size
|
||||
|
||||
|
||||
@@ -182,8 +182,8 @@ struct compute_inverse_size4<Architecture::SSE, double, MatrixType, ResultType>
|
||||
};
|
||||
static void run(const MatrixType& matrix, ResultType& result)
|
||||
{
|
||||
const EIGEN_ALIGN16 long long int _Sign_NP[2] = { 0x8000000000000000ll, 0x0000000000000000ll };
|
||||
const EIGEN_ALIGN16 long long int _Sign_PN[2] = { 0x0000000000000000ll, 0x8000000000000000ll };
|
||||
const __m128d _Sign_NP = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0));
|
||||
const __m128d _Sign_PN = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
|
||||
|
||||
// The inverse is calculated using "Divide and Conquer" technique. The
|
||||
// original matrix is divide into four 2x2 sub-matrices. Since each
|
||||
@@ -316,8 +316,8 @@ struct compute_inverse_size4<Architecture::SSE, double, MatrixType, ResultType>
|
||||
iB1 = _mm_sub_pd(_mm_mul_pd(C1, dB), iB1);
|
||||
iB2 = _mm_sub_pd(_mm_mul_pd(C2, dB), iB2);
|
||||
|
||||
d1 = _mm_xor_pd(rd, _mm_load_pd((double*)_Sign_PN));
|
||||
d2 = _mm_xor_pd(rd, _mm_load_pd((double*)_Sign_NP));
|
||||
d1 = _mm_xor_pd(rd, _Sign_PN);
|
||||
d2 = _mm_xor_pd(rd, _Sign_NP);
|
||||
|
||||
// iC = B*|C| - A*C#*D;
|
||||
dC = _mm_shuffle_pd(dC,dC,0);
|
||||
|
||||
@@ -93,7 +93,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
|
||||
*/
|
||||
ColPivHouseholderQR(Index rows, Index cols)
|
||||
: m_qr(rows, cols),
|
||||
m_hCoeffs(std::min(rows,cols)),
|
||||
m_hCoeffs((std::min)(rows,cols)),
|
||||
m_colsPermutation(cols),
|
||||
m_colsTranspositions(cols),
|
||||
m_temp(cols),
|
||||
@@ -103,7 +103,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
|
||||
|
||||
ColPivHouseholderQR(const MatrixType& matrix)
|
||||
: m_qr(matrix.rows(), matrix.cols()),
|
||||
m_hCoeffs(std::min(matrix.rows(),matrix.cols())),
|
||||
m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),
|
||||
m_colsPermutation(matrix.cols()),
|
||||
m_colsTranspositions(matrix.cols()),
|
||||
m_temp(matrix.cols()),
|
||||
|
||||
@@ -93,21 +93,21 @@ template<typename _MatrixType> class FullPivHouseholderQR
|
||||
*/
|
||||
FullPivHouseholderQR(Index rows, Index cols)
|
||||
: m_qr(rows, cols),
|
||||
m_hCoeffs(std::min(rows,cols)),
|
||||
m_hCoeffs((std::min)(rows,cols)),
|
||||
m_rows_transpositions(rows),
|
||||
m_cols_transpositions(cols),
|
||||
m_cols_permutation(cols),
|
||||
m_temp(std::min(rows,cols)),
|
||||
m_temp((std::min)(rows,cols)),
|
||||
m_isInitialized(false),
|
||||
m_usePrescribedThreshold(false) {}
|
||||
|
||||
FullPivHouseholderQR(const MatrixType& matrix)
|
||||
: m_qr(matrix.rows(), matrix.cols()),
|
||||
m_hCoeffs(std::min(matrix.rows(), matrix.cols())),
|
||||
m_hCoeffs((std::min)(matrix.rows(), matrix.cols())),
|
||||
m_rows_transpositions(matrix.rows()),
|
||||
m_cols_transpositions(matrix.cols()),
|
||||
m_cols_permutation(matrix.cols()),
|
||||
m_temp(std::min(matrix.rows(), matrix.cols())),
|
||||
m_temp((std::min)(matrix.rows(), matrix.cols())),
|
||||
m_isInitialized(false),
|
||||
m_usePrescribedThreshold(false)
|
||||
{
|
||||
@@ -379,7 +379,7 @@ FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(cons
|
||||
{
|
||||
Index rows = matrix.rows();
|
||||
Index cols = matrix.cols();
|
||||
Index size = std::min(rows,cols);
|
||||
Index size = (std::min)(rows,cols);
|
||||
|
||||
m_qr = matrix;
|
||||
m_hCoeffs.resize(size);
|
||||
@@ -493,7 +493,7 @@ struct solve_retval<FullPivHouseholderQR<_MatrixType>, Rhs>
|
||||
RealScalar biggest_in_upper_part_of_c = c.topRows( dec().rank() ).cwiseAbs().maxCoeff();
|
||||
RealScalar biggest_in_lower_part_of_c = c.bottomRows(rows-dec().rank()).cwiseAbs().maxCoeff();
|
||||
// FIXME brain dead
|
||||
const RealScalar m_precision = NumTraits<Scalar>::epsilon() * std::min(rows,cols);
|
||||
const RealScalar m_precision = NumTraits<Scalar>::epsilon() * (std::min)(rows,cols);
|
||||
// this internal:: prefix is needed by at least gcc 3.4 and ICC
|
||||
if(!internal::isMuchSmallerThan(biggest_in_lower_part_of_c, biggest_in_upper_part_of_c, m_precision))
|
||||
return;
|
||||
@@ -520,7 +520,7 @@ typename FullPivHouseholderQR<MatrixType>::MatrixQType FullPivHouseholderQR<Matr
|
||||
// and v_k is the k-th Householder vector [1,m_qr(k+1,k), m_qr(k+2,k), ...]
|
||||
Index rows = m_qr.rows();
|
||||
Index cols = m_qr.cols();
|
||||
Index size = std::min(rows,cols);
|
||||
Index size = (std::min)(rows,cols);
|
||||
MatrixQType res = MatrixQType::Identity(rows, rows);
|
||||
Matrix<Scalar,1,MatrixType::RowsAtCompileTime> temp(rows);
|
||||
for (Index k = size-1; k >= 0; k--)
|
||||
|
||||
18
extern/Eigen3/Eigen/src/QR/HouseholderQR.h
vendored
18
extern/Eigen3/Eigen/src/QR/HouseholderQR.h
vendored
@@ -88,13 +88,13 @@ template<typename _MatrixType> class HouseholderQR
|
||||
*/
|
||||
HouseholderQR(Index rows, Index cols)
|
||||
: m_qr(rows, cols),
|
||||
m_hCoeffs(std::min(rows,cols)),
|
||||
m_hCoeffs((std::min)(rows,cols)),
|
||||
m_temp(cols),
|
||||
m_isInitialized(false) {}
|
||||
|
||||
HouseholderQR(const MatrixType& matrix)
|
||||
: m_qr(matrix.rows(), matrix.cols()),
|
||||
m_hCoeffs(std::min(matrix.rows(),matrix.cols())),
|
||||
m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),
|
||||
m_temp(matrix.cols()),
|
||||
m_isInitialized(false)
|
||||
{
|
||||
@@ -210,7 +210,7 @@ void householder_qr_inplace_unblocked(MatrixQR& mat, HCoeffs& hCoeffs, typename
|
||||
typedef typename MatrixQR::RealScalar RealScalar;
|
||||
Index rows = mat.rows();
|
||||
Index cols = mat.cols();
|
||||
Index size = std::min(rows,cols);
|
||||
Index size = (std::min)(rows,cols);
|
||||
|
||||
eigen_assert(hCoeffs.size() == size);
|
||||
|
||||
@@ -250,7 +250,7 @@ void householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs,
|
||||
|
||||
Index rows = mat.rows();
|
||||
Index cols = mat.cols();
|
||||
Index size = std::min(rows, cols);
|
||||
Index size = (std::min)(rows, cols);
|
||||
|
||||
typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixQR::MaxColsAtCompileTime,1> TempType;
|
||||
TempType tempVector;
|
||||
@@ -260,12 +260,12 @@ void householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs,
|
||||
tempData = tempVector.data();
|
||||
}
|
||||
|
||||
Index blockSize = std::min(maxBlockSize,size);
|
||||
Index blockSize = (std::min)(maxBlockSize,size);
|
||||
|
||||
int k = 0;
|
||||
Index k = 0;
|
||||
for (k = 0; k < size; k += blockSize)
|
||||
{
|
||||
Index bs = std::min(size-k,blockSize); // actual size of the block
|
||||
Index bs = (std::min)(size-k,blockSize); // actual size of the block
|
||||
Index tcols = cols - k - bs; // trailing columns
|
||||
Index brows = rows-k; // rows of the block
|
||||
|
||||
@@ -299,7 +299,7 @@ struct solve_retval<HouseholderQR<_MatrixType>, Rhs>
|
||||
template<typename Dest> void evalTo(Dest& dst) const
|
||||
{
|
||||
const Index rows = dec().rows(), cols = dec().cols();
|
||||
const Index rank = std::min(rows, cols);
|
||||
const Index rank = (std::min)(rows, cols);
|
||||
eigen_assert(rhs().rows() == rows);
|
||||
|
||||
typename Rhs::PlainObject c(rhs());
|
||||
@@ -327,7 +327,7 @@ HouseholderQR<MatrixType>& HouseholderQR<MatrixType>::compute(const MatrixType&
|
||||
{
|
||||
Index rows = matrix.rows();
|
||||
Index cols = matrix.cols();
|
||||
Index size = std::min(rows,cols);
|
||||
Index size = (std::min)(rows,cols);
|
||||
|
||||
m_qr = matrix;
|
||||
m_hCoeffs.resize(size);
|
||||
|
||||
8
extern/Eigen3/Eigen/src/SVD/JacobiSVD.h
vendored
8
extern/Eigen3/Eigen/src/SVD/JacobiSVD.h
vendored
@@ -569,7 +569,7 @@ void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, u
|
||||
"JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. "
|
||||
"Use the ColPivHouseholderQR preconditioner instead.");
|
||||
}
|
||||
m_diagSize = std::min(m_rows, m_cols);
|
||||
m_diagSize = (std::min)(m_rows, m_cols);
|
||||
m_singularValues.resize(m_diagSize);
|
||||
m_matrixU.resize(m_rows, m_computeFullU ? m_rows
|
||||
: m_computeThinU ? m_diagSize
|
||||
@@ -619,8 +619,8 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
|
||||
// notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
|
||||
// keep us iterating forever.
|
||||
using std::max;
|
||||
if(max(internal::abs(m_workMatrix.coeff(p,q)),internal::abs(m_workMatrix.coeff(q,p)))
|
||||
> max(internal::abs(m_workMatrix.coeff(p,p)),internal::abs(m_workMatrix.coeff(q,q)))*precision)
|
||||
if((max)(internal::abs(m_workMatrix.coeff(p,q)),internal::abs(m_workMatrix.coeff(q,p)))
|
||||
> (max)(internal::abs(m_workMatrix.coeff(p,p)),internal::abs(m_workMatrix.coeff(q,q)))*precision)
|
||||
{
|
||||
finished = false;
|
||||
|
||||
@@ -689,7 +689,7 @@ struct solve_retval<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
|
||||
// A = U S V^*
|
||||
// So A^{-1} = V S^{-1} U^*
|
||||
|
||||
Index diagSize = std::min(dec().rows(), dec().cols());
|
||||
Index diagSize = (std::min)(dec().rows(), dec().cols());
|
||||
typename JacobiSVDType::SingularValuesType invertedSingVals(diagSize);
|
||||
|
||||
Index nonzeroSingVals = dec().nonzeroSingularValues();
|
||||
|
||||
2
extern/Eigen3/Eigen/src/Sparse/AmbiVector.h
vendored
2
extern/Eigen3/Eigen/src/Sparse/AmbiVector.h
vendored
@@ -97,7 +97,7 @@ class AmbiVector
|
||||
void reallocateSparse()
|
||||
{
|
||||
Index copyElements = m_allocatedElements;
|
||||
m_allocatedElements = std::min(Index(m_allocatedElements*1.5),m_size);
|
||||
m_allocatedElements = (std::min)(Index(m_allocatedElements*1.5),m_size);
|
||||
Index allocSize = m_allocatedElements * sizeof(ListEl);
|
||||
allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0);
|
||||
Scalar* newBuffer = new Scalar[allocSize];
|
||||
|
||||
@@ -216,7 +216,7 @@ class CompressedStorage
|
||||
{
|
||||
Scalar* newValues = new Scalar[size];
|
||||
Index* newIndices = new Index[size];
|
||||
size_t copySize = std::min(size, m_size);
|
||||
size_t copySize = (std::min)(size, m_size);
|
||||
// copy
|
||||
memcpy(newValues, m_values, copySize * sizeof(Scalar));
|
||||
memcpy(newIndices, m_indices, copySize * sizeof(Index));
|
||||
|
||||
@@ -141,7 +141,7 @@ class DynamicSparseMatrix
|
||||
{
|
||||
if (outerSize()>0)
|
||||
{
|
||||
Index reserveSizePerVector = std::max(reserveSize/outerSize(),Index(4));
|
||||
Index reserveSizePerVector = (std::max)(reserveSize/outerSize(),Index(4));
|
||||
for (Index j=0; j<outerSize(); ++j)
|
||||
{
|
||||
m_data[j].reserve(reserveSizePerVector);
|
||||
|
||||
2
extern/Eigen3/Eigen/src/Sparse/SparseFuzzy.h
vendored
2
extern/Eigen3/Eigen/src/Sparse/SparseFuzzy.h
vendored
@@ -35,7 +35,7 @@
|
||||
// const typename internal::nested<Derived,2>::type nested(derived());
|
||||
// const typename internal::nested<OtherDerived,2>::type otherNested(other.derived());
|
||||
// return (nested - otherNested).cwise().abs2().sum()
|
||||
// <= prec * prec * std::min(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum());
|
||||
// <= prec * prec * (std::min)(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum());
|
||||
// }
|
||||
|
||||
#endif // EIGEN_SPARSE_FUZZY_H
|
||||
|
||||
@@ -257,7 +257,7 @@ class SparseMatrix
|
||||
// furthermore we bound the realloc ratio to:
|
||||
// 1) reduce multiple minor realloc when the matrix is almost filled
|
||||
// 2) avoid to allocate too much memory when the matrix is almost empty
|
||||
reallocRatio = std::min(std::max(reallocRatio,1.5f),8.f);
|
||||
reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
|
||||
}
|
||||
}
|
||||
m_data.resize(m_data.size()+1,reallocRatio);
|
||||
|
||||
@@ -223,7 +223,7 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
// thanks to shallow copies, we always eval to a tempary
|
||||
Derived temp(other.rows(), other.cols());
|
||||
|
||||
temp.reserve(std::max(this->rows(),this->cols())*2);
|
||||
temp.reserve((std::max)(this->rows(),this->cols())*2);
|
||||
for (Index j=0; j<outerSize; ++j)
|
||||
{
|
||||
temp.startVec(j);
|
||||
@@ -253,7 +253,7 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
// eval without temporary
|
||||
derived().resize(other.rows(), other.cols());
|
||||
derived().setZero();
|
||||
derived().reserve(std::max(this->rows(),this->cols())*2);
|
||||
derived().reserve((std::max)(this->rows(),this->cols())*2);
|
||||
for (Index j=0; j<outerSize; ++j)
|
||||
{
|
||||
derived().startVec(j);
|
||||
|
||||
@@ -383,7 +383,7 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
|
||||
continue;
|
||||
|
||||
Index ip = perm ? perm[i] : i;
|
||||
count[DstUpLo==Lower ? std::min(ip,jp) : std::max(ip,jp)]++;
|
||||
count[DstUpLo==Lower ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
|
||||
}
|
||||
}
|
||||
dest._outerIndexPtr()[0] = 0;
|
||||
@@ -403,8 +403,8 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
|
||||
continue;
|
||||
|
||||
Index ip = perm? perm[i] : i;
|
||||
Index k = count[DstUpLo==Lower ? std::min(ip,jp) : std::max(ip,jp)]++;
|
||||
dest._innerIndexPtr()[k] = DstUpLo==Lower ? std::max(ip,jp) : std::min(ip,jp);
|
||||
Index k = count[DstUpLo==Lower ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
|
||||
dest._innerIndexPtr()[k] = DstUpLo==Lower ? (std::max)(ip,jp) : (std::min)(ip,jp);
|
||||
|
||||
if((DstUpLo==Lower && ip<jp) || (DstUpLo==Upper && ip>jp))
|
||||
dest._valuePtr()[k] = conj(it.value());
|
||||
|
||||
@@ -45,7 +45,7 @@ static void sparse_product_impl2(const Lhs& lhs, const Rhs& rhs, ResultType& res
|
||||
// estimate the number of non zero entries
|
||||
float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols()));
|
||||
float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols);
|
||||
float ratioRes = std::min(ratioLhs * avgNnzPerRhsColumn, 1.f);
|
||||
float ratioRes = (std::min)(ratioLhs * avgNnzPerRhsColumn, 1.f);
|
||||
|
||||
// int t200 = rows/(log2(200)*1.39);
|
||||
// int t = (rows*100)/139;
|
||||
@@ -131,7 +131,7 @@ static void sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
// estimate the number of non zero entries
|
||||
float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols()));
|
||||
float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols);
|
||||
float ratioRes = std::min(ratioLhs * avgNnzPerRhsColumn, 1.f);
|
||||
float ratioRes = (std::min)(ratioLhs * avgNnzPerRhsColumn, 1.f);
|
||||
|
||||
// mimics a resizeByInnerOuter:
|
||||
if(ResultType::IsRowMajor)
|
||||
@@ -143,7 +143,7 @@ static void sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
for (Index j=0; j<cols; ++j)
|
||||
{
|
||||
// let's do a more accurate determination of the nnz ratio for the current column j of res
|
||||
//float ratioColRes = std::min(ratioLhs * rhs.innerNonZeros(j), 1.f);
|
||||
//float ratioColRes = (std::min)(ratioLhs * rhs.innerNonZeros(j), 1.f);
|
||||
// FIXME find a nice way to get the number of nonzeros of a sub matrix (here an inner vector)
|
||||
float ratioColRes = ratioRes;
|
||||
tempVector.init(ratioColRes);
|
||||
|
||||
18
extern/Eigen3/Eigen/src/StlSupport/StdVector.h
vendored
18
extern/Eigen3/Eigen/src/StlSupport/StdVector.h
vendored
@@ -28,32 +28,24 @@
|
||||
|
||||
#include "Eigen/src/StlSupport/details.h"
|
||||
|
||||
// Define the explicit instantiation (e.g. necessary for the Intel compiler)
|
||||
#if defined(__INTEL_COMPILER) || defined(__GNUC__)
|
||||
#define EIGEN_EXPLICIT_STL_VECTOR_INSTANTIATION(...) template class std::vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> >;
|
||||
#else
|
||||
#define EIGEN_EXPLICIT_STL_VECTOR_INSTANTIATION(...)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* This section contains a convenience MACRO which allows an easy specialization of
|
||||
* std::vector such that for data types with alignment issues the correct allocator
|
||||
* is used automatically.
|
||||
*/
|
||||
#define EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(...) \
|
||||
EIGEN_EXPLICIT_STL_VECTOR_INSTANTIATION(__VA_ARGS__) \
|
||||
namespace std \
|
||||
{ \
|
||||
template<typename _Ay> \
|
||||
class vector<__VA_ARGS__, _Ay> \
|
||||
template<> \
|
||||
class vector<__VA_ARGS__, std::allocator<__VA_ARGS__> > \
|
||||
: public vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \
|
||||
{ \
|
||||
typedef vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > vector_base; \
|
||||
public: \
|
||||
typedef __VA_ARGS__ value_type; \
|
||||
typedef typename vector_base::allocator_type allocator_type; \
|
||||
typedef typename vector_base::size_type size_type; \
|
||||
typedef typename vector_base::iterator iterator; \
|
||||
typedef vector_base::allocator_type allocator_type; \
|
||||
typedef vector_base::size_type size_type; \
|
||||
typedef vector_base::iterator iterator; \
|
||||
explicit vector(const allocator_type& a = allocator_type()) : vector_base(a) {} \
|
||||
template<typename InputIterator> \
|
||||
vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : vector_base(first, last, a) {} \
|
||||
|
||||
2
extern/libmv/CMakeLists.txt
vendored
2
extern/libmv/CMakeLists.txt
vendored
@@ -170,8 +170,6 @@ IF(WIN32)
|
||||
string(REPLACE "" "Od" CMAKE_C_FLAGS_RELWITHDEBINFO "")
|
||||
endforeach()
|
||||
ENDIF(MSVC)
|
||||
|
||||
add_definitions(-DEIGEN_DONT_ALIGN_STATICALLY)
|
||||
ELSE(WIN32)
|
||||
list(APPEND SRC
|
||||
third_party/glog/src/utilities.cc
|
||||
|
||||
2
extern/libmv/SConscript
vendored
2
extern/libmv/SConscript
vendored
@@ -45,8 +45,6 @@ if env['OURPLATFORM'] in ('win32-vc', 'win32-mingw', 'linuxcross', 'win64-vc'):
|
||||
cflags_libmv = Split(env['REL_CFLAGS'])
|
||||
ccflags_libmv = Split(env['REL_CCFLAGS'])
|
||||
cxxflags_libmv = Split(env['REL_CXXFLAGS'])
|
||||
|
||||
defs.append('EIGEN_DONT_ALIGN_STATICALLY')
|
||||
else:
|
||||
src += env.Glob("third_party/glog/src/*.cc")
|
||||
incs += ' ./third_party/glog/src'
|
||||
|
||||
@@ -205,11 +205,10 @@ class CLIP_PT_tools_tracking(Panel):
|
||||
row.operator("clip.track_markers", text="", icon='FRAME_NEXT')
|
||||
|
||||
col = layout.column(align=True)
|
||||
op = col.operator("clip.clear_track_path", \
|
||||
text="Clear Remained Path")
|
||||
op = col.operator("clip.clear_track_path", text="Clear After")
|
||||
op.action = 'REMAINED'
|
||||
|
||||
op = col.operator("clip.clear_track_path", text="Clear Path Up To")
|
||||
op = col.operator("clip.clear_track_path", text="Clear Before")
|
||||
op.action = 'UPTO'
|
||||
|
||||
op = col.operator("clip.clear_track_path", text="Clear Track Path")
|
||||
|
||||
@@ -61,11 +61,12 @@ void BKE_tracking_join_tracks(struct MovieTrackingTrack *dst_track, struct Movie
|
||||
void BKE_tracking_free(struct MovieTracking *tracking);
|
||||
|
||||
struct ImBuf *BKE_tracking_acquire_pattern_imbuf(struct ImBuf *ibuf, struct MovieTrackingTrack *track,
|
||||
struct MovieTrackingMarker *marker, int margin, float pos[2], int origin[2]);
|
||||
struct MovieTrackingMarker *marker, int margin, int anchored, float pos[2], int origin[2]);
|
||||
struct ImBuf *BKE_tracking_acquire_search_imbuf(struct ImBuf *ibuf, struct MovieTrackingTrack *track,
|
||||
struct MovieTrackingMarker *marker, int margin, float pos[2], int origin[2]);
|
||||
struct MovieTrackingMarker *marker, int margin, int anchored, float pos[2], int origin[2]);
|
||||
|
||||
struct MovieTrackingContext *BKE_tracking_context_new(struct MovieClip *clip, struct MovieClipUser *user, int backwards);
|
||||
struct MovieTrackingContext *BKE_tracking_context_new(struct MovieClip *clip, struct MovieClipUser *user,
|
||||
short backwards, short disable_failed);
|
||||
void BKE_tracking_context_free(struct MovieTrackingContext *context);
|
||||
void BKE_tracking_sync(struct MovieTrackingContext *context);
|
||||
void BKE_tracking_sync_user(struct MovieClipUser *user, struct MovieTrackingContext *context);
|
||||
|
||||
@@ -712,7 +712,7 @@ void BKE_movieclip_update_scopes(MovieClip *clip, MovieClipUser *user, MovieClip
|
||||
if(ibuf && ibuf->rect) {
|
||||
ImBuf *tmpibuf;
|
||||
|
||||
tmpibuf= BKE_tracking_acquire_pattern_imbuf(ibuf, track, marker, 1, scopes->track_pos, NULL);
|
||||
tmpibuf= BKE_tracking_acquire_pattern_imbuf(ibuf, track, marker, 1, 1, scopes->track_pos, NULL);
|
||||
|
||||
if(tmpibuf->rect_float)
|
||||
IMB_rect_from_float(tmpibuf);
|
||||
|
||||
@@ -494,11 +494,11 @@ typedef struct MovieTrackingContext {
|
||||
GHash *hash;
|
||||
MovieTrackingSettings settings;
|
||||
|
||||
int backwards;
|
||||
short backwards, disable_failed;
|
||||
int sync_frame;
|
||||
} MovieTrackingContext;
|
||||
|
||||
MovieTrackingContext *BKE_tracking_context_new(MovieClip *clip, MovieClipUser *user, int backwards)
|
||||
MovieTrackingContext *BKE_tracking_context_new(MovieClip *clip, MovieClipUser *user, short backwards, short disable_failed)
|
||||
{
|
||||
MovieTrackingContext *context= MEM_callocN(sizeof(MovieTrackingContext), "trackingContext");
|
||||
MovieTracking *tracking= &clip->tracking;
|
||||
@@ -508,6 +508,7 @@ MovieTrackingContext *BKE_tracking_context_new(MovieClip *clip, MovieClipUser *u
|
||||
|
||||
context->settings= *settings;
|
||||
context->backwards= backwards;
|
||||
context->disable_failed= disable_failed;
|
||||
context->hash= BLI_ghash_new(BLI_ghashutil_ptrhash, BLI_ghashutil_ptrcmp, "tracking trackHash");
|
||||
context->sync_frame= user->framenr;
|
||||
context->first_time= 1;
|
||||
@@ -640,14 +641,19 @@ static void disable_imbuf_channels(ImBuf *ibuf, MovieTrackingTrack *track)
|
||||
}
|
||||
|
||||
static ImBuf *acquire_area_imbuf(ImBuf *ibuf, MovieTrackingTrack *track, MovieTrackingMarker *marker,
|
||||
float min[2], float max[2], int margin, float pos[2], int origin[2])
|
||||
float min[2], float max[2], int margin, int anchored, float pos[2], int origin[2])
|
||||
{
|
||||
ImBuf *tmpibuf;
|
||||
int x, y;
|
||||
int x1, y1, x2, y2, w, h;
|
||||
float mpos[2];
|
||||
|
||||
x= marker->pos[0]*ibuf->x;
|
||||
y= marker->pos[1]*ibuf->y;
|
||||
copy_v2_v2(mpos, marker->pos);
|
||||
if(anchored)
|
||||
add_v2_v2(mpos, track->offset);
|
||||
|
||||
x= mpos[0]*ibuf->x;
|
||||
y= mpos[1]*ibuf->y;
|
||||
x1= x-(int)(-min[0]*ibuf->x);
|
||||
y1= y-(int)(-min[1]*ibuf->y);
|
||||
x2= x+(int)(max[0]*ibuf->x);
|
||||
@@ -665,8 +671,8 @@ static ImBuf *acquire_area_imbuf(ImBuf *ibuf, MovieTrackingTrack *track, MovieTr
|
||||
IMB_rectcpy(tmpibuf, ibuf, 0, 0, x1-margin, y1-margin, w+margin*2, h+margin*2);
|
||||
|
||||
if(pos != NULL) {
|
||||
pos[0]= x-x1+(marker->pos[0]*ibuf->x-x)+margin;
|
||||
pos[1]= y-y1+(marker->pos[1]*ibuf->y-y)+margin;
|
||||
pos[0]= x-x1+(mpos[0]*ibuf->x-x)+margin;
|
||||
pos[1]= y-y1+(mpos[1]*ibuf->y-y)+margin;
|
||||
}
|
||||
|
||||
if(origin != NULL) {
|
||||
@@ -680,15 +686,15 @@ static ImBuf *acquire_area_imbuf(ImBuf *ibuf, MovieTrackingTrack *track, MovieTr
|
||||
}
|
||||
|
||||
ImBuf *BKE_tracking_acquire_pattern_imbuf(ImBuf *ibuf, MovieTrackingTrack *track, MovieTrackingMarker *marker,
|
||||
int margin, float pos[2], int origin[2])
|
||||
int margin, int anchored, float pos[2], int origin[2])
|
||||
{
|
||||
return acquire_area_imbuf(ibuf, track, marker, track->pat_min, track->pat_max, margin, pos, origin);
|
||||
return acquire_area_imbuf(ibuf, track, marker, track->pat_min, track->pat_max, margin, anchored, pos, origin);
|
||||
}
|
||||
|
||||
ImBuf *BKE_tracking_acquire_search_imbuf(ImBuf *ibuf, MovieTrackingTrack *track, MovieTrackingMarker *marker,
|
||||
int margin, float pos[2], int origin[2])
|
||||
int margin, int anchored, float pos[2], int origin[2])
|
||||
{
|
||||
return acquire_area_imbuf(ibuf, track, marker, track->search_min, track->search_max, margin, pos, origin);
|
||||
return acquire_area_imbuf(ibuf, track, marker, track->search_min, track->search_max, margin, anchored, pos, origin);
|
||||
}
|
||||
|
||||
#ifdef WITH_LIBMV
|
||||
@@ -702,7 +708,7 @@ static float *acquire_search_floatbuf(ImBuf *ibuf, MovieTrackingTrack *track, Mo
|
||||
width= (track->search_max[0]-track->search_min[0])*ibuf->x;
|
||||
height= (track->search_max[1]-track->search_min[1])*ibuf->y;
|
||||
|
||||
tmpibuf= BKE_tracking_acquire_search_imbuf(ibuf, track, marker, 0, pos, origin);
|
||||
tmpibuf= BKE_tracking_acquire_search_imbuf(ibuf, track, marker, 0, 0, pos, origin);
|
||||
disable_imbuf_channels(tmpibuf, track);
|
||||
|
||||
*width_r= width;
|
||||
@@ -742,7 +748,7 @@ static unsigned char *acquire_search_bytebuf(ImBuf *ibuf, MovieTrackingTrack *tr
|
||||
width= (track->search_max[0]-track->search_min[0])*ibuf->x;
|
||||
height= (track->search_max[1]-track->search_min[1])*ibuf->y;
|
||||
|
||||
tmpibuf= BKE_tracking_acquire_search_imbuf(ibuf, track, marker, 0, pos, origin);
|
||||
tmpibuf= BKE_tracking_acquire_search_imbuf(ibuf, track, marker, 0, 0, pos, origin);
|
||||
disable_imbuf_channels(tmpibuf, track);
|
||||
|
||||
*width_r= width;
|
||||
@@ -950,7 +956,7 @@ int BKE_tracking_next(MovieTrackingContext *context)
|
||||
if(!ibuf_new)
|
||||
return 0;
|
||||
|
||||
//#pragma omp parallel for private(a) shared(ibuf_new, ok) if(context->num_tracks>1)
|
||||
#pragma omp parallel for private(a) shared(ibuf_new, ok) if(context->num_tracks>1)
|
||||
for(a= 0; a<context->num_tracks; a++) {
|
||||
TrackContext *track_context= &context->track_context[a];
|
||||
MovieTrackingTrack *track= track_context->track;
|
||||
@@ -1075,7 +1081,7 @@ int BKE_tracking_next(MovieTrackingContext *context)
|
||||
MEM_freeN(image_new);
|
||||
}
|
||||
|
||||
if(tracked) {
|
||||
if(tracked || !context->disable_failed) {
|
||||
if(context->first_time) {
|
||||
int prevframe;
|
||||
|
||||
|
||||
@@ -288,8 +288,17 @@ static void marker_block_handler(bContext *C, void *arg_cb, int event)
|
||||
|
||||
ok= 1;
|
||||
} else if(event==B_MARKER_OFFSET) {
|
||||
cb->track->offset[0]= cb->track_offset[0]/width;
|
||||
cb->track->offset[1]= cb->track_offset[1]/height;
|
||||
float offset[2], delta[2];
|
||||
int i;
|
||||
|
||||
offset[0]= cb->track_offset[0]/width;
|
||||
offset[1]= cb->track_offset[1]/height;
|
||||
|
||||
sub_v2_v2v2(delta, offset, cb->track->offset);
|
||||
copy_v2_v2(cb->track->offset, offset);
|
||||
|
||||
for(i=0; i<cb->track->markersnr; i++)
|
||||
sub_v2_v2(cb->track->markers[i].pos, delta);
|
||||
|
||||
/* to update position of "parented" objects */
|
||||
DAG_id_tag_update(&cb->clip->id, 0);
|
||||
|
||||
@@ -354,7 +354,7 @@ static void draw_track_path(SpaceClip *sc, MovieClip *UNUSED(clip), MovieTrackin
|
||||
static void draw_marker_outline(SpaceClip *sc, MovieTrackingTrack *track, MovieTrackingMarker *marker, int width, int height)
|
||||
{
|
||||
int tiny= sc->flag&SC_SHOW_TINY_MARKER;
|
||||
int show_pat= 0;
|
||||
int show_search= 0;
|
||||
float px[2];
|
||||
|
||||
UI_ThemeColor(TH_MARKER_OUTLINE);
|
||||
@@ -401,8 +401,7 @@ static void draw_marker_outline(SpaceClip *sc, MovieTrackingTrack *track, MovieT
|
||||
|
||||
if(!tiny) glLineWidth(3.0f);
|
||||
|
||||
show_pat= ((marker->flag&MARKER_DISABLED)==0 || (sc->flag&SC_SHOW_MARKER_SEARCH)==0);
|
||||
if(sc->flag&SC_SHOW_MARKER_PATTERN && show_pat) {
|
||||
if(sc->flag&SC_SHOW_MARKER_PATTERN) {
|
||||
glBegin(GL_LINE_LOOP);
|
||||
glVertex2f(track->pat_min[0], track->pat_min[1]);
|
||||
glVertex2f(track->pat_max[0], track->pat_min[1]);
|
||||
@@ -411,7 +410,8 @@ static void draw_marker_outline(SpaceClip *sc, MovieTrackingTrack *track, MovieT
|
||||
glEnd();
|
||||
}
|
||||
|
||||
if(sc->flag&SC_SHOW_MARKER_SEARCH) {
|
||||
show_search= TRACK_VIEW_SELECTED(sc, track) && ((marker->flag&MARKER_DISABLED)==0 || (sc->flag&SC_SHOW_MARKER_PATTERN)==0);
|
||||
if(sc->flag&SC_SHOW_MARKER_SEARCH && show_search) {
|
||||
glBegin(GL_LINE_LOOP);
|
||||
glVertex2f(track->search_min[0], track->search_min[1]);
|
||||
glVertex2f(track->search_max[0], track->search_min[1]);
|
||||
@@ -442,7 +442,7 @@ static void track_colors(MovieTrackingTrack *track, int act, float col[3], float
|
||||
static void draw_marker_areas(SpaceClip *sc, MovieTrackingTrack *track, MovieTrackingMarker *marker, int width, int height, int act, int sel)
|
||||
{
|
||||
int tiny= sc->flag&SC_SHOW_TINY_MARKER;
|
||||
int show_pat= 0;
|
||||
int show_search= 0;
|
||||
float col[3], scol[3], px[2];
|
||||
|
||||
track_colors(track, act, col, scol);
|
||||
@@ -515,8 +515,7 @@ static void draw_marker_areas(SpaceClip *sc, MovieTrackingTrack *track, MovieTra
|
||||
glEnable(GL_LINE_STIPPLE);
|
||||
}
|
||||
|
||||
show_pat= ((marker->flag&MARKER_DISABLED)==0 || (sc->flag&SC_SHOW_MARKER_SEARCH)==0);
|
||||
if((track->pat_flag&SELECT)==sel && show_pat) {
|
||||
if((track->pat_flag&SELECT)==sel && (sc->flag&SC_SHOW_MARKER_PATTERN)) {
|
||||
if(track->flag&TRACK_LOCKED) {
|
||||
if(act) UI_ThemeColor(TH_ACT_MARKER);
|
||||
else if(track->pat_flag&SELECT) UI_ThemeColorShade(TH_LOCK_MARKER, 64);
|
||||
@@ -531,18 +530,17 @@ static void draw_marker_areas(SpaceClip *sc, MovieTrackingTrack *track, MovieTra
|
||||
else glColor3fv(col);
|
||||
}
|
||||
|
||||
if(sc->flag&SC_SHOW_MARKER_PATTERN) {
|
||||
glBegin(GL_LINE_LOOP);
|
||||
glVertex2f(track->pat_min[0], track->pat_min[1]);
|
||||
glVertex2f(track->pat_max[0], track->pat_min[1]);
|
||||
glVertex2f(track->pat_max[0], track->pat_max[1]);
|
||||
glVertex2f(track->pat_min[0], track->pat_max[1]);
|
||||
glEnd();
|
||||
}
|
||||
glBegin(GL_LINE_LOOP);
|
||||
glVertex2f(track->pat_min[0], track->pat_min[1]);
|
||||
glVertex2f(track->pat_max[0], track->pat_min[1]);
|
||||
glVertex2f(track->pat_max[0], track->pat_max[1]);
|
||||
glVertex2f(track->pat_min[0], track->pat_max[1]);
|
||||
glEnd();
|
||||
}
|
||||
|
||||
/* search */
|
||||
if((track->search_flag&SELECT)==sel) {
|
||||
show_search= TRACK_VIEW_SELECTED(sc, track) && ((marker->flag&MARKER_DISABLED)==0 || (sc->flag&SC_SHOW_MARKER_PATTERN)==0);
|
||||
if((track->search_flag&SELECT)==sel && (sc->flag&SC_SHOW_MARKER_SEARCH) && show_search) {
|
||||
if(track->flag&TRACK_LOCKED) {
|
||||
if(act) UI_ThemeColor(TH_ACT_MARKER);
|
||||
else if(track->search_flag&SELECT) UI_ThemeColorShade(TH_LOCK_MARKER, 64);
|
||||
@@ -557,14 +555,12 @@ static void draw_marker_areas(SpaceClip *sc, MovieTrackingTrack *track, MovieTra
|
||||
else glColor3fv(col);
|
||||
}
|
||||
|
||||
if(sc->flag&SC_SHOW_MARKER_SEARCH) {
|
||||
glBegin(GL_LINE_LOOP);
|
||||
glVertex2f(track->search_min[0], track->search_min[1]);
|
||||
glVertex2f(track->search_max[0], track->search_min[1]);
|
||||
glVertex2f(track->search_max[0], track->search_max[1]);
|
||||
glVertex2f(track->search_min[0], track->search_max[1]);
|
||||
glEnd();
|
||||
}
|
||||
glBegin(GL_LINE_LOOP);
|
||||
glVertex2f(track->search_min[0], track->search_min[1]);
|
||||
glVertex2f(track->search_max[0], track->search_min[1]);
|
||||
glVertex2f(track->search_max[0], track->search_max[1]);
|
||||
glVertex2f(track->search_min[0], track->search_max[1]);
|
||||
glEnd();
|
||||
}
|
||||
|
||||
if(tiny)
|
||||
|
||||
@@ -383,6 +383,12 @@ static int view_pan_modal(bContext *C, wmOperator *op, wmEvent *event)
|
||||
RNA_float_set_array(op->ptr, "offset", offset);
|
||||
view_pan_exec(C, op);
|
||||
break;
|
||||
case ESCKEY:
|
||||
view_pan_exit(C, op, 1);
|
||||
return OPERATOR_CANCELLED;
|
||||
case SPACEKEY:
|
||||
view_pan_exit(C, op, 0);
|
||||
return OPERATOR_FINISHED;
|
||||
default:
|
||||
if(event->type==vpd->event_type && event->val==KM_RELEASE) {
|
||||
view_pan_exit(C, op, 0);
|
||||
@@ -397,6 +403,7 @@ static int view_pan_modal(bContext *C, wmOperator *op, wmEvent *event)
|
||||
static int view_pan_cancel(bContext *C, wmOperator *op)
|
||||
{
|
||||
view_pan_exit(C, op, 1);
|
||||
|
||||
return OPERATOR_CANCELLED;
|
||||
}
|
||||
|
||||
|
||||
@@ -1256,7 +1256,7 @@ static void track_markers_initjob(bContext *C, TrackMarkersJob *tmj, int backwar
|
||||
|
||||
track_init_markers(sc, clip);
|
||||
|
||||
tmj->context= BKE_tracking_context_new(clip, &sc->user, backwards);
|
||||
tmj->context= BKE_tracking_context_new(clip, &sc->user, backwards, 1);
|
||||
|
||||
clip->tracking_context= tmj->context;
|
||||
|
||||
@@ -1361,7 +1361,8 @@ static int track_markers_exec(bContext *C, wmOperator *op)
|
||||
|
||||
track_init_markers(sc, clip);
|
||||
|
||||
context= BKE_tracking_context_new(clip, &sc->user, backwards);
|
||||
/* do not disable tracks due to threshold when tracking frame-by-frame */
|
||||
context= BKE_tracking_context_new(clip, &sc->user, backwards, sequence);
|
||||
|
||||
while(framenr != efra) {
|
||||
if(!BKE_tracking_next(context))
|
||||
|
||||
Reference in New Issue
Block a user