Commit 13bb6406 authored by MONSEIGNE Thibaut's avatar MONSEIGNE Thibaut

🔀 Merge branch '45-feature-add-asr-box' into 'development'

Resolve " Feature: Add ASR Box"

Closes #45

See merge request !87
parents 9b532ce5 5ed7dcf8
......@@ -19,6 +19,13 @@ if(UNIX)
SET_TARGET_PROPERTIES(${PROJECT_NAME} PROPERTIES COMPILE_FLAGS "-fPIC")
ENDIF(UNIX)
if(WIN32)
ADD_DEFINITIONS(/bigobj) # Definition for big obj file in debug mode with visual studio
ENDIF(WIN32)
ADD_DEFINITIONS(-D_USE_MATH_DEFINES) # Definition for constant math as M_PI
# OpenViBE Third Party
INCLUDE("FindThirdPartyEigen")
INCLUDE("FindThirdPartyBoost")
......
......@@ -18,7 +18,7 @@
namespace Geometry {
/// <summary> Enumeration of Standardization method for features matrix data.</summary>
/// <summary> Enumeration of Standardization method for features matrix data. </summary>
enum class EStandardization
{
None, ///< No change.
......@@ -33,7 +33,7 @@ enum class EStandardization
/// <summary> Apply an affine transformation and return the result (The last transpose is useless if matrix is SPD).
/// \f[
/// B = R^{-1/2} * A * {R^{-1/2}}^{\mathsf{T}}
/// \f]\n
/// \f]
/// </summary>
/// <param name="ref"> The reference matrix which transforms. </param>
/// <param name="matrix"> the matrix to transform. </param>
......@@ -44,56 +44,62 @@ Eigen::MatrixXd AffineTransformation(const Eigen::MatrixXd& ref, const Eigen::Ma
/// <summary> Standardize data row by row with selected method (destructive operation). </summary>
/// <param name="matrix"> The matrix to standardize. </param>
/// <param name="standard"> Standard method. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool MatrixStandardization(Eigen::MatrixXd& matrix, EStandardization standard = EStandardization::None);
/// <summary> Standardize data row by row with selected method (non destructive operation). </summary>
/// <param name="in"> The matrix to standardize. </param>
/// <param name="out"> The matrix standardized. </param>
/// <param name="standard"> Standard method. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool MatrixStandardization(const Eigen::MatrixXd& in, Eigen::MatrixXd& out, EStandardization standard = EStandardization::None);
/// <summary> Removes the mean of each row at the matrix (destructive operation). </summary>
/// <summary> Removes the mean of each row at the matrix (destructive operation).\n
/// So \f$\mu=0\f$. </summary>
/// <param name="matrix"> The Matrix to center. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool MatrixCenter(Eigen::MatrixXd& matrix);
/// <summary> Removes the mean of each row at the matrix (non destructive operation). </summary>
/// <summary> Removes the mean of each row at the matrix (non destructive operation).\n
/// So \f$\mu=0\f$. </summary>
/// <param name="in"> The Matrix to center. </param>
/// <param name="out"> The Matrix centered. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool MatrixCenter(const Eigen::MatrixXd& in, Eigen::MatrixXd& out);
/// <summary> Removes the mean of each row at the matrix and divide by the variance (destructive operation with scale return). </summary>
/// <summary> Removes the mean of each row at the matrix and divide by the variance (destructive operation with scale return).\n
/// So \f$\mu=0\f$ and \f$\sigma=1\f$. </summary>
/// <param name="matrix"> The Matrix to standardize. </param>
/// <param name="scale"> The scale vector. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <remarks> Adaptation of <a href="http://scikit-learn.org">sklearn</a> <a href="https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html">StandardScaler</a> (<a href="https://github.com/scikit-learn/scikit-learn/blob/master/COPYING">License</a>).</remarks>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
/// <remarks> Adaptation of <a href="http://scikit-learn.org">sklearn</a> <a href="https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html">StandardScaler</a> (<a href="https://github.com/scikit-learn/scikit-learn/blob/master/COPYING">License</a>). </remarks>
bool MatrixStandardScaler(Eigen::MatrixXd& matrix, Eigen::RowVectorXd& scale);
/// <summary> Removes the mean of each row at the matrix and divide by the variance (destructive operation). </summary>
/// <summary> Removes the mean of each row at the matrix and divide by the variance (destructive operation).\n
/// So \f$\mu=0\f$ and \f$\sigma=1\f$. </summary>
/// <param name="matrix"> The Matrix to standardize. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <remarks> Adaptation of <a href="http://scikit-learn.org">sklearn</a> <a href="https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html">StandardScaler</a> (<a href="https://github.com/scikit-learn/scikit-learn/blob/master/COPYING">License</a>).</remarks>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
/// <remarks> Adaptation of <a href="http://scikit-learn.org">sklearn</a> <a href="https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html">StandardScaler</a> (<a href="https://github.com/scikit-learn/scikit-learn/blob/master/COPYING">License</a>). </remarks>
bool MatrixStandardScaler(Eigen::MatrixXd& matrix);
/// <summary> Removes the mean of each row at the matrix and divide by the variance (non destructive operation). </summary>
/// <summary> Removes the mean of each row at the matrix and divide by the variance (non destructive operation with scale return).\n
/// So \f$\mu=0\f$ and \f$\sigma=1\f$. </summary>
/// <param name="in"> The Matrix to standardize. </param>
/// <param name="out"> The Matrix standardized. </param>
/// <param name="scale"> The scale vector. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <remarks> Adaptation of <a href="http://scikit-learn.org">sklearn</a> <a href="https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html">StandardScaler</a> (<a href="https://github.com/scikit-learn/scikit-learn/blob/master/COPYING">License</a>).</remarks>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
/// <remarks> Adaptation of <a href="http://scikit-learn.org">sklearn</a> <a href="https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html">StandardScaler</a> (<a href="https://github.com/scikit-learn/scikit-learn/blob/master/COPYING">License</a>). </remarks>
bool MatrixStandardScaler(const Eigen::MatrixXd& in, Eigen::MatrixXd& out, Eigen::RowVectorXd& scale);
/// <summary> Removes the mean of each row at the matrix and divide by the variance (non destructive operation). </summary>
/// <summary> Removes the mean of each row at the matrix and divide by the variance (non destructive operation).\n
/// So \f$\mu=0\f$ and \f$\sigma=1\f$. </summary>
/// <param name="in"> The Matrix to standardize. </param>
/// <param name="out"> The Matrix standardized. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <remarks> Adaptation of <a href="http://scikit-learn.org">sklearn</a> <a href="https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html">StandardScaler</a> (<a href="https://github.com/scikit-learn/scikit-learn/blob/master/COPYING">License</a>).</remarks>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
/// <remarks> Adaptation of <a href="http://scikit-learn.org">sklearn</a> <a href="https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html">StandardScaler</a> (<a href="https://github.com/scikit-learn/scikit-learn/blob/master/COPYING">License</a>). </remarks>
bool MatrixStandardScaler(const Eigen::MatrixXd& in, Eigen::MatrixXd& out);
/// <summary> Give the string format of Matrix for OpenViBE Log. </summary>
/// <summary> Give the string format of Matrix. </summary>
/// <param name="matrix"> The Matrix to display. </param>
/// <returns> The string format. </returns>
std::string MatrixPrint(const Eigen::MatrixXd& matrix);
......@@ -102,7 +108,7 @@ std::string MatrixPrint(const Eigen::MatrixXd& matrix);
/// <param name="matrix1"> First Matrix. </param>
/// <param name="matrix2"> Second Matrix. </param>
/// <param name="precision"> Precision for matrix comparison. </param>
/// <returns> True if Equals, false if not. </returns>
/// <returns> <c>True</c> if the two elements are equals (with a precision tolerance), <c>False</c> otherwise. </returns>
bool AreEquals(const Eigen::MatrixXd& matrix1, const Eigen::MatrixXd& matrix2, double precision = 1e-6);
//*************************************************************
......@@ -114,7 +120,7 @@ bool AreEquals(const Eigen::MatrixXd& matrix1, const Eigen::MatrixXd& matrix2, d
/// <returns> Row with selected elements. </returns>
Eigen::RowVectorXd GetElements(const Eigen::RowVectorXd& row, const std::vector<size_t>& index);
/// <summary> Numpy arange in C++. </summary>
/// <summary> <a href="https://numpy.org/doc/stable/reference/generated/numpy.arange.html">Numpy arange</a> implementation in C++. </summary>
/// <typeparam name="T"> Generic numeric type parameter. </typeparam>
/// <param name="start"> The start. </param>
/// <param name="stop"> The stop. </param>
......@@ -132,7 +138,7 @@ std::vector<T> ARange(const T start, const T stop, const T step = 1)
/// <summary> Turn vector of vector into vector. </summary>
/// <typeparam name="T"> Generic type parameter. </typeparam>
/// <param name="in"> vector of vector. </param>
/// <returns> vector&lt;T&gt; </returns>
/// <returns> <c>vector&lt;T&gt;</c> </returns>
template <typename T>
std::vector<T> Vector2DTo1D(const std::vector<std::vector<T>>& in)
{
......@@ -144,11 +150,11 @@ std::vector<T> Vector2DTo1D(const std::vector<std::vector<T>>& in)
return result;
}
/// <summary> Turn vector into vector of vector with postiion repartition. </summary>
/// <summary> Turn vector into vector of vector with position repartition. </summary>
/// <typeparam name="T"> Generic type parameter. </typeparam>
/// <param name="in"> vector of vector. </param>
/// <param name="position"> position of element (size of position is the number of row the values are the number of element on each row). </param>
/// <returns> vector&lt;T&gt; </returns>
/// <returns> <c>vector&lt;T&gt;</c> </returns>
template <typename T>
std::vector<std::vector<T>> Vector1DTo2D(const std::vector<T>& in, const std::vector<size_t>& position)
{
......@@ -171,46 +177,46 @@ std::vector<std::vector<T>> Vector1DTo2D(const std::vector<T>& in, const std::ve
/// <param name="value"> The value. </param>
/// <param name="min"> The minimum. </param>
/// <param name="max"> The maximum. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
bool InRange(double value, double min, double max);
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool InRange(const double value, const double min, const double max);
/// <summary> Validate if the vector is not empty and the matrices are validate. </summary>
/// <param name="matrices"> Vector of Matrix. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool AreNotEmpty(const std::vector<Eigen::MatrixXd>& matrices);
/// <summary> Validates if matrix is not empty. </summary>
/// <param name="matrix"> Matrix. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool IsNotEmpty(const Eigen::MatrixXd& matrix);
/// <summary> Validates if two matrix have same size. </summary>
/// <param name="a"> Matrix A. </param>
/// <param name="b"> Matrix B. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool HaveSameSize(const Eigen::MatrixXd& a, const Eigen::MatrixXd& b);
/// <summary> Validate if the vector is not empty and the matrices have same size. </summary>
/// <param name="matrices"> Vector of Matrix. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool HaveSameSize(const std::vector<Eigen::MatrixXd>& matrices);
/// <summary> Validates if matrix is square matrix and not empty. </summary>
/// <param name="matrix"> Matrix. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool IsSquare(const Eigen::MatrixXd& matrix);
/// <summary> Validate if the vector is not empty and the matrices are square matrix and not empty. </summary>
/// <param name="matrices"> Vector of Matrix. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool AreSquare(const std::vector<Eigen::MatrixXd>& matrices);
//********************************************************
//******************** CSV MANAGEMENT ********************
//********************************************************
/// <summary>Return the string split by the \p sep parameter</summary>
/// <param name="s"> The string to split.</param>
/// <param name="sep"> the separator string which splits.</param>
/// <summary>Return the string split by the \p sep parameter. </summary>
/// <param name="s"> The string to split. </param>
/// <param name="sep"> the separator string which splits. </param>
/// <returns> Vector of string part. </returns>
std::vector<std::string> Split(const std::string& s, const std::string& sep);
......
......@@ -20,27 +20,27 @@
namespace Geometry {
/// <summary> Compute the weight of Linear Discriminant Analysis with Least squares (LSQR) Solver. </summary>
/// <param name="datasets"> The datasets one class by row and trials on colums. </param>
/// <param name="weight"> The Weight to apply. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <remarks> Inspired by <a href="http://scikit-learn.org">sklearn</a> <a href="https://scikit-learn.org/stable/modules/generated/sklearn.discriminant_analysis.LinearDiscriminantAnalysis.html">LinearDiscriminantAnalysis</a> (<a href="https://github.com/scikit-learn/scikit-learn/blob/master/COPYING">License</a>).</remarks>
bool LSQR(const std::vector<std::vector<Eigen::RowVectorXd>>& datasets, Eigen::MatrixXd& weight);
/// <param name="dataset"> The dataset (first dimension is the class, second dimension the trial as Feature Vector). </param>
/// <param name="weight"> The weight to apply. </param>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
/// <remarks> Inspired by <a href="http://scikit-learn.org">sklearn</a> <a href="https://scikit-learn.org/stable/modules/generated/sklearn.discriminant_analysis.LinearDiscriminantAnalysis.html">LinearDiscriminantAnalysis</a> (<a href="https://github.com/scikit-learn/scikit-learn/blob/master/COPYING">License</a>). </remarks>
bool LSQR(const std::vector<std::vector<Eigen::RowVectorXd>>& dataset, Eigen::MatrixXd& weight);
/// <summary> Compute Least squares (LSQR) Weight and transform to FgDA Weight. \n
/// \f[ W_{\text{FgDA}} = W^{\mathsf{T}} \times (W \times W^{\mathsf{T}})^{-1} \times W \f]
/// </summary>
/// <param name="datasets"> The data set one class by row and trials on colums. </param>
/// <param name="dataset"> The dataset (first dimension is the class, second dimension is the trial as Feature Vector). </param>
/// <param name="weight"> The Weight to apply. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <remarks> Method inspired by the work of Alexandre Barachant : <a href="https://github.com/alexandrebarachant/pyRiemann">pyRiemann</a> (<a href="https://github.com/alexandrebarachant/pyRiemann/blob/master/LICENSE">License</a>).</remarks>
bool FgDACompute(const std::vector<std::vector<Eigen::RowVectorXd>>& datasets, Eigen::MatrixXd& weight);
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
/// <remarks> Method inspired by the work of Alexandre Barachant : <a href="https://github.com/alexandrebarachant/pyRiemann">pyRiemann</a> (<a href="https://github.com/alexandrebarachant/pyRiemann/blob/master/LICENSE">License</a>). </remarks>
bool FgDACompute(const std::vector<std::vector<Eigen::RowVectorXd>>& dataset, Eigen::MatrixXd& weight);
/// <summary> Apply the weight on the vector. (just a matrix product) </summary>
/// <param name="in"> Sample to transform. </param>
/// <param name="out"> Transformed Sample. </param>
/// <param name="weight"> The Weight to apply. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <remarks> Method inspired by the work of Alexandre Barachant : <a href="https://github.com/alexandrebarachant/pyRiemann">pyRiemann</a> (<a href="https://github.com/alexandrebarachant/pyRiemann/blob/master/LICENSE">License</a>).</remarks>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
/// <remarks> Method inspired by the work of Alexandre Barachant : <a href="https://github.com/alexandrebarachant/pyRiemann">pyRiemann</a> (<a href="https://github.com/alexandrebarachant/pyRiemann/blob/master/LICENSE">License</a>). </remarks>
bool FgDAApply(const Eigen::RowVectorXd& in, Eigen::RowVectorXd& out, const Eigen::MatrixXd& weight);
} // namespace Geometry
......@@ -10,7 +10,7 @@
/// - List of Estimator inspired by the work of Alexandre Barachant : <a href="https://github.com/alexandrebarachant/pyRiemann">pyRiemann</a> (<a href="https://github.com/alexandrebarachant/pyRiemann/blob/master/LICENSE">License</a>).
/// - <a href="http://scikit-learn.org/stable/modules/generated/sklearn.covariance.LedoitWolf.html">Ledoit and Wolf Estimator</a> inspired by <a href="http://scikit-learn.org">sklearn</a> (<a href="https://github.com/scikit-learn/scikit-learn/blob/master/COPYING">License</a>).
/// - <a href="http://scikit-learn.org/stable/modules/generated/sklearn.covariance.OAS.html">Oracle Approximating Shrinkage (OAS) Estimator</a> Inspired by <a href="http://scikit-learn.org">sklearn</a> (<a href="https://github.com/scikit-learn/scikit-learn/blob/master/COPYING">License</a>).
/// - <b>Minimum Covariance Determinant (MCD) Estimator isn't implemented.</b>
/// - <b>Minimum Covariance Determinant (MCD) Estimator isn't implemented. </b>
///
///-------------------------------------------------------------------------------------------------
......@@ -36,9 +36,9 @@ enum class EEstimator
IDE ///< The Identity Matrix.
};
/// <summary> Convert estimators to string.</summary>
/// <param name="estimator"> The estimator.</param>
/// <returns> std::string </returns>
/// <summary> Convert estimators to string. </summary>
/// <param name="estimator"> The estimator. </param>
/// <returns> <c>std::string</c> </returns>
inline std::string toString(const EEstimator estimator)
{
switch (estimator)
......@@ -50,8 +50,8 @@ inline std::string toString(const EEstimator estimator)
case EEstimator::MCD: return "Minimum Covariance Determinant (MCD)";
case EEstimator::COR: return "Pearson Correlation";
case EEstimator::IDE: return "Identity";
default: return "Invalid";
}
return "Invalid";
}
/// <summary> Convert string to estimators. </summary>
......@@ -71,18 +71,18 @@ inline EEstimator StringToEstimator(const std::string& estimator)
//***********************************************************
//******************** COVARIANCES BASES ********************
//***********************************************************
/// <summary> Calculation of the Variance of a double data set \f$\vec{X}\f$.\n
/// \f[ V(X) = \left(\frac{1}{n} \sum_{i=1}^{N}x_{i}^{2}\right) - \mu^{2} \quad \text{with}~ \mu = \frac{1}{n} \sum_{i=1}^{N}x_{i} \f]
/// <summary> Calculation of the Variance of a double dataset \f$\vec{X}\f$.\n
/// \f[ V(X) = \left(\frac{1}{n} \sum_{i=1}^{N}x_{i}^{2}\right) - \left(\frac{1}{n} \sum_{i=1}^{N}x_{i}\right)^{2} \f]
/// </summary>
/// <param name="x"> The data set \f$\vec{X}\f$. With \f$ N \f$ Samples. </param>
/// <param name="x"> The dataset \f$\vec{X}\f$. With \f$ N \f$ Samples. </param>
/// <returns> The Variance. </returns>
double Variance(const Eigen::RowVectorXd& x);
/// <summary> Calculation of the Covariance between two double data set \f$\vec{X}\f$, \f$\vec{Y}\f$.\n
/// <summary> Calculation of the Covariance between two double dataset \f$\vec{X}, \vec{Y}\f$.\n
/// \f[ \operatorname{Cov}\left(x,y\right) = \frac{\sum_{i=1}^{N}{x_{i}y_{i}} - \left(\sum_{i=1}^{N}{x_{i}}\sum_{i=1}^{N}{y_{i}}\right)/N}{N}\f]
/// </summary>
/// <param name="x"> The data set \f$\vec{X}\f$. With \f$ N \f$ Samples.</param>
/// <param name="y"> The data set \f$\vec{Y}\f$. With \f$ N \f$ Samples.</param>
/// <param name="x"> The dataset \f$\vec{X}\f$. With \f$ N \f$ Samples. </param>
/// <param name="y"> The dataset \f$\vec{Y}\f$. With \f$ N \f$ Samples. </param>
/// <returns> The Covariance. </returns>
double Covariance(const Eigen::RowVectorXd& x, const Eigen::RowVectorXd& y);
......@@ -91,7 +91,7 @@ double Covariance(const Eigen::RowVectorXd& x, const Eigen::RowVectorXd& y);
/// </summary>
/// <param name="cov"> The Covariance Matrix to shrink. </param>
/// <param name="shrinkage"> (Optional) The shrinkage coefficient : \f$ 0\leq \text{shrinkage} \leq 1\f$. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool ShrunkCovariance(Eigen::MatrixXd& cov, double shrinkage = 0.1);
/// <summary> Shrunks the Covariance Matrix \f$ M \f$ (non destructive operation).\n
......@@ -100,18 +100,18 @@ bool ShrunkCovariance(Eigen::MatrixXd& cov, double shrinkage = 0.1);
/// <param name="in"> The covariance matrix to shrink. </param>
/// <param name="out"> The shrunk covariance matrix. </param>
/// <param name="shrinkage"> (Optional) The shrinkage coefficient : \f$ 0\leq \text{shrinkage} \leq 1\f$. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool ShrunkCovariance(const Eigen::MatrixXd& in, Eigen::MatrixXd& out, double shrinkage = 0.1);
/// <summary> Select the function to call for the covariance matrix.\n
/// - centralizing the data is useless for <see cref="EEstimator::COV"/> and <see cref="EEstimator::COR"/>.\n
/// - centralizing the data is not usual for <see cref="EEstimator::SCM"/>.
/// - centralizing the data is useless for <c><see cref="EEstimator::COV"/></c> and <c><see cref="EEstimator::COR"/></c>.\n
/// - centralizing the data is not usual for <c><see cref="EEstimator::SCM"/></c>.
/// </summary>
/// <param name="in"> The data set \f$\vec{X}\f$. With \f$ N \f$ Rows (features) and \f$ S \f$ columns (samples). </param>
/// <param name="in"> The dataset \f$\vec{X}\f$. With \f$ N \f$ Rows (features) and \f$ S \f$ columns (samples). </param>
/// <param name="out"> The Covariance Matrix. </param>
/// <param name="estimator"> (Optional) The selected estimator (see <see cref="EEstimator"/>). </param>
/// <param name="standard"> (Optional) Standardize the data (see <see cref="EStandardization"/>). </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool CovarianceMatrix(const Eigen::MatrixXd& in, Eigen::MatrixXd& out, EEstimator estimator = EEstimator::COV,
EStandardization standard = EStandardization::Center);
......@@ -122,25 +122,25 @@ bool CovarianceMatrix(const Eigen::MatrixXd& in, Eigen::MatrixXd& out, EEstimato
/// \f[ M_{\operatorname{Cov}} =
/// \begin{pmatrix}
/// V\left(x_1\right) & \operatorname{Cov}\left(x_1,x_2\right) &\cdots & \operatorname{Cov}\left(x_1,x_N\right)\\
/// \operatorname{Cov}\left(x_2,x_1\right) &\ddots & \cdots & \vdots \\
/// \vdots & \vdots & \ddots & \vdots \\
/// \operatorname{Cov}\left(x_2,x_1\right) &\ddots & \ddots & \vdots \\
/// \vdots & \ddots & \ddots & \vdots \\
/// \operatorname{Cov}\left(x_N,x_1\right) &\cdots & \cdots & V\left(x_N\right)
/// \end{pmatrix}
/// \quad \text{with } x_i \text{ the row } i
/// \quad\quad \text{with } x_i \text{ the feature } i
/// \f]\n
/// With the <see cref="Variance"/> and <see cref="Covariance"/> function.
/// </summary>
/// <param name="samples"> The data set \f$\vec{X}\f$. With \f$ N \f$ Rows (features) and \f$ S \f$ columns (samples). </param>
/// <param name="samples"> The dataset \f$\vec{X}\f$. With \f$ N \f$ Rows (features) and \f$ S \f$ columns (samples). </param>
/// <param name="cov"> The Covariance Matrix. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool CovarianceMatrixCOV(const Eigen::MatrixXd& samples, Eigen::MatrixXd& cov);
/// <summary> Calculation of the covariance matrix by the method : Normalized Spatial Covariance Matrix (SCM).\n
/// \f[ M_{\operatorname{Cov_{SCM}}} = \frac{XX^{\mathsf{T}}}{\operatorname{trace}{\left(XX^{\mathsf{T}}\right)}} \f]
/// </summary>
/// <param name="samples"> The data set \f$\vec{X}\f$. With \f$ N \f$ Rows (features) and \f$ S \f$ columns (samples). </param>
/// <param name="samples"> The dataset \f$\vec{X}\f$. With \f$ N \f$ Rows (features) and \f$ S \f$ columns (samples). </param>
/// <param name="cov"> The Covariance Matrix. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool CovarianceMatrixSCM(const Eigen::MatrixXd& samples, Eigen::MatrixXd& cov);
/// <summary> Calculation of the covariance matrix and shrinkage by the method : Ledoit and Wolf.\n
......@@ -164,9 +164,9 @@ bool CovarianceMatrixSCM(const Eigen::MatrixXd& samples, Eigen::MatrixXd& cov);
/// \f[ \text{Shrinkage}_\text{LWF} = \frac{\beta}{\delta} \quad \text{with } \delta = \frac{\Sigma\left( M_{\delta}^2 \right)}{N} \quad\text{and}\quad
/// \beta = \operatorname{min}\left(\frac{\Sigma\left( M_{\beta}^2 \right)}{N \times S},~ \delta\right)\f]
/// </summary>
/// <param name="samples"> The data set \f$\vec{X}\f$. With \f$ N \f$ Rows (features) and \f$ S \f$ columns (samples). </param>
/// <param name="samples"> The dataset \f$\vec{X}\f$. With \f$ N \f$ Rows (features) and \f$ S \f$ columns (samples). </param>
/// <param name="cov"> The Covariance Matrix. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool CovarianceMatrixLWF(const Eigen::MatrixXd& samples, Eigen::MatrixXd& cov);
/// <summary> Calculation of the covariance matrix and shrinkage by the method : Oracle Approximating Shrinkage (OAS).\n
......@@ -192,15 +192,15 @@ bool CovarianceMatrixLWF(const Eigen::MatrixXd& samples, Eigen::MatrixXd& cov);
/// \end{cases}
/// \f]
/// </summary>
/// <param name="samples"> The data set \f$\vec{X}\f$. With \f$ N \f$ Rows (features) and \f$ S \f$ columns (samples). </param>
/// <param name="samples"> The dataset \f$\vec{X}\f$. With \f$ N \f$ Rows (features) and \f$ S \f$ columns (samples). </param>
/// <param name="cov"> The Covariance Matrix. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool CovarianceMatrixOAS(const Eigen::MatrixXd& samples, Eigen::MatrixXd& cov);
/// <summary>Calculation of the covariance matrix and shrinkage by the method : Minimum Covariance Determinant (MCD).</summary>
/// <param name="samples"> The data set \f$\vec{X}\f$. With \f$ N \f$ Rows (features) and \f$ S \f$ columns (samples). </param>
/// <summary>Calculation of the covariance matrix and shrinkage by the method : Minimum Covariance Determinant (MCD). </summary>
/// <param name="samples"> The dataset \f$\vec{X}\f$. With \f$ N \f$ Rows (features) and \f$ S \f$ columns (samples). </param>
/// <param name="cov"> The Covariance Matrix. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
/// \todo Not implemented.
bool CovarianceMatrixMCD(const Eigen::MatrixXd& samples, Eigen::MatrixXd& cov);
......@@ -210,15 +210,15 @@ bool CovarianceMatrixMCD(const Eigen::MatrixXd& samples, Eigen::MatrixXd& cov);
/// = \frac{ M_{\operatorname{Cov}}\left(i,j\right) } { \sqrt{ M_{\operatorname{Cov}}\left(i,i\right) * M_{\operatorname{Cov}}\left(j,j\right) } }
/// \f]
/// </summary>
/// <param name="samples"> The data set \f$\vec{X}\f$. With \f$ N \f$ Rows (features) and \f$ S \f$ columns (samples). </param>
/// <param name="samples"> The dataset \f$\vec{X}\f$. With \f$ N \f$ Rows (features) and \f$ S \f$ columns (samples). </param>
/// <param name="cov"> The Covariance Matrix. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool CovarianceMatrixCOR(const Eigen::MatrixXd& samples, Eigen::MatrixXd& cov);
/// <summary> Return the Identity matrix \f$ I_N \f$.</summary>
/// <param name="samples"> The data set \f$\vec{X}\f$. With \f$ N \f$ Rows (features) and \f$ S \f$ columns (samples). </param>
/// <summary> Return the Identity matrix \f$ I_N \f$. </summary>
/// <param name="samples"> The dataset \f$\vec{X}\f$. With \f$ N \f$ Rows (features) and \f$ S \f$ columns (samples). </param>
/// <param name="cov"> The Covariance Matrix. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool CovarianceMatrixIDE(const Eigen::MatrixXd& samples, Eigen::MatrixXd& cov);
} // namespace Geometry
......@@ -15,23 +15,23 @@
namespace Geometry {
/// <summary> Compute the features vector of covariance in with the selected method. </summary>
/// <summary> Compute the features vector of covariance matrix with the selected method. </summary>
/// <param name="in"> The covariance in. </param>
/// <param name="out"> The Feature Vector. </param>
/// <param name="tangent"> (Optional) True to use tangent space featurization, Upper Triangle Squeeze if false. </param>
/// <param name="ref"> The reference Matrix (usefull for Tangent Space Featurization). </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <param name="out"> The Feature Vector. </param>
/// <param name="tangent"> (Optional) True to use tangent space featurization, Upper Triangle Squeeze if false. </param>
/// <param name="ref"> The reference Matrix (usefull for Tangent Space Featurization). </param>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool Featurization(const Eigen::MatrixXd& in, Eigen::RowVectorXd& out, bool tangent = true, const Eigen::MatrixXd& ref = Eigen::MatrixXd());
/// <summary> Compute the covariance out of features vector with the selected method. </summary>
/// <param name="in"> The Feature Vector. </param>
/// <summary> Compute the covariance matrix of features vector with the selected method. </summary>
/// <param name="in"> The Feature Vector. </param>
/// <param name="out"> The covariance out. </param>
/// <param name="tangent"> (Optional) True to use tangent space featurization, Upper Triangle Squeeze if false. </param>
/// <param name="ref"> The reference Matrix (usefull for Tangent Space Featurization). </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <param name="tangent"> (Optional) True to use tangent space featurization, Upper Triangle Squeeze if false. </param>
/// <param name="ref"> The reference Matrix (usefull for Tangent Space Featurization). </param>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool UnFeaturization(const Eigen::RowVectorXd& in, Eigen::MatrixXd& out, bool tangent = true, const Eigen::MatrixXd& ref = Eigen::MatrixXd());
/// <summary> Squeeze the upper triangle of NxN square in to a N(N+1)/2 Vector.
/// <summary> Squeeze the upper triangle of \f$N \times N\f$ square matrix to a \f$\frac{N\left(N+1\right)}{2}\f$ Vector.
/// <table align="center" border="0">
/// <tr><th>Upper Triangle Matrix</th><th></th><th>Row Major Upper Triangle Squeeze</th> <th></th> <th>Diagonal Major Upper Triangle Squeeze</th></tr>
/// <tr><td>\f[ \begin{pmatrix} a&b&c\\d&e&f\\g&h&i \end{pmatrix} \Rightarrow \begin{pmatrix} a&b&c\\0&e&f\\0&0&i \end{pmatrix} \f]</td>
......@@ -41,13 +41,13 @@ bool UnFeaturization(const Eigen::RowVectorXd& in, Eigen::MatrixXd& out, bool ta
/// <td>\f[\begin{pmatrix} a&b&c\\d&e&f\\g&h&i \end{pmatrix} \Rightarrow \begin{pmatrix} a&e&i&b&f&c \end{pmatrix} \f]</td></tr>
/// </table>
/// </summary>
/// <param name="in"> The NXN in. </param>
/// <param name="out"> The N(N+1)/2 vector. </param>
/// <param name="in"> The \f$N \times N\f$ square matrix. </param>
/// <param name="out"> The \f$\frac{N\left(N+1\right)}{2}\f$ Vector. </param>
/// <param name="rowMajor"> Get the values row by row if true, diagonal by diagonal if false. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool SqueezeUpperTriangle(const Eigen::MatrixXd& in, Eigen::RowVectorXd& out, bool rowMajor = true);
/// <summary> Compute the upper triangle of N(N+1)/2 Vector to a NxN square out.
/// <summary> Compute the upper triangle of \f$\frac{N\left(N+1\right)}{2}\f$ Vector to a \f$N \times N\f$ square matrix.
/// <table align="center" border="0">
/// <tr><th>Row Major Method</th> <th></th> <th>Diagonal Major Method</th></tr>
/// <tr><td>\f[ \begin{pmatrix} a&b&c&d&e&f \end{pmatrix} \Rightarrow \begin{pmatrix} a&b&c\\0&d&e\\0&0&f \end{pmatrix} \f]</td>
......@@ -55,34 +55,43 @@ bool SqueezeUpperTriangle(const Eigen::MatrixXd& in, Eigen::RowVectorXd& out, bo
/// <td>\f[ \begin{pmatrix} a&b&c&d&e&f \end{pmatrix} \Rightarrow \begin{pmatrix} a&d&f\\0&b&e\\0&0&c \end{pmatrix} \f]</td></tr>
/// </table>
/// </summary>
/// <param name="in"> The N(N+1)/2 vector. </param>
/// <param name="out"> The NXN out. </param>
/// <param name="in"> The \f$\frac{N\left(N+1\right)}{2}\f$ Vector. </param>
/// <param name="out"> The \f$N \times N\f$ square matrix. </param>
/// <param name="rowMajor"> Get the values row by row if true, diagonal by diagonal if false. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool UnSqueezeUpperTriangle(const Eigen::RowVectorXd& in, Eigen::MatrixXd& out, bool rowMajor = true);
/// <summary> Project a covariance matrices in the tangent space according to the given reference point. \n
/// <summary> Project a covariance matrices (\f$M\f$) in the tangent space (\f$\mathcal{T}\f$) according to the given reference point (\f$M_\text{Ref}\f$). <br/>
///
/// - Compute the transformation matrix for the covariance matrix \f$M\f$ with the reference matrix \f$M_\text{Ref}\f$ and squeeze ths matrix (see <see cref="SqueezeUpperTriangle"/>).
/// \f[
/// \begin{aligned}
/// J &= \log{\left(M_\text{Ref}^{-1/2} \times M \times M_\text{Ref}^{-1/2}\right)}\\
/// V_J &= \operatorname{SqueezeUpperTriangle}(J)
/// \end{aligned}
/// \f]
/// - Compute a coefficient Vector to apply to transformation vector.
/// \f[
/// \begin{aligned}
/// J &= \log{\left(M_\text{Ref}^{-1/2} ~ M ~ M_\text{Ref}^{-1/2}\right)} \\
/// M_\text{Coeffs} &= \begin{pmatrix}
/// 1 & \sqrt{2} & \cdots & \sqrt{2} \\
/// 0 & 1 & \ddots & \sqrt{2} \\
/// \vdots & \ddots & \ddots & \vdots\\
/// 0 & \cdots & \cdots & 1
/// \end{pmatrix}
/// \end{aligned} \\
/// \text{With : } V_J = \operatorname{SqueezeUpperTriangle}(J) \quad \text{ and } \quad V_\text{Coeffs} = \operatorname{SqueezeUpperTriangle}(M_\text{Coeffs})\\
/// \Rightarrow V_\text{Ts} = V_J \odot V_\text{Coeffs}
/// \end{pmatrix}\\
/// V_\text{Coeffs} &= \operatorname{SqueezeUpperTriangle}(M_\text{Coeffs})\\
/// \end{aligned}
/// \f]
/// - Compute the element wise product of the two vectors to have the tangent space Projection \f$\zeta_M\f$
/// \f[ \zeta_M = V_J \odot V_\text{Coeffs} \f]
/// </summary>
/// <param name="in"> The NXN covariance in. </param>
/// <param name="out"> The N(N+1)/2 row. </param>
/// <param name="ref"> (Optional) The NXN reference in (use the identity Matrix if empty). </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <param name="in"> The \f$N \times N\f$ covariance matrix. </param>
/// <param name="out"> The \f$\frac{N\left(N+1\right)}{2}\f$ row. </param>
/// <param name="ref"> (Optional) The \f$N \times N\f$ reference in (use the identity Matrix if empty). </param>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool TangentSpace(const Eigen::MatrixXd& in, Eigen::RowVectorXd& out, const Eigen::MatrixXd& ref = Eigen::MatrixXd());
/// <summary> Project a Tangent space vectors in the manifold according to the given reference point. \n
/// <summary> Project a Tangent space vectors in the manifold according to the given reference point. <br/>
/// \f[
/// \begin{aligned}
/// \text{With : } M_\text{Ts} &= \operatorname{UnSqueezeUpperTriangle}(V_\text{Ts}) \quad \text{ and } \quad \mathsf{U}_{M}\text{ the upper triangular out.}\\
......@@ -91,10 +100,10 @@ bool TangentSpace(const Eigen::MatrixXd& in, Eigen::RowVectorXd& out, const Eige
/// \end{aligned}
/// \f]
/// </summary>
/// <param name="in"> The N(N+1)/2 row. </param>
/// <param name="out"> The NXN covariance out. </param>
/// <param name="ref"> (Optional) The NXN reference out (use the identity Matrix if empty). </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <param name="in"> The \f$\frac{N\left(N+1\right)}{2}\f$ row. </param>
/// <param name="out"> The \f$N \times N\f$ covariance matrix. </param>
/// <param name="ref"> (Optional) The \f$N \times N\f$ reference out (use the identity Matrix if empty). </param>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool UnTangentSpace(const Eigen::RowVectorXd& in, Eigen::MatrixXd& out, const Eigen::MatrixXd& ref = Eigen::MatrixXd());
} // namespace Geometry
......@@ -19,14 +19,14 @@
namespace Geometry {
/// <summary> Compute the matrix at the position alpha on the geodesic between A and B with the selected \p metric.\n
/// - Allowed Metrics : <see cref="EMetric::Riemann"/>, <see cref="EMetric::Euclidian"/>, <see cref="EMetric::LogEuclidian"/>, <see cref="EMetric::Identity"/>
/// - Allowed Metrics : <c>Riemann</c>, <c>Euclidian</c>, <c>LogEuclidian</c>, <c>Identity</c>
/// </summary>
/// <param name="a"> The First Covariance matrix. </param>
/// <param name="b"> The Second Covariance matrix. </param>
/// <param name="g"> The Geodesic. </param>
/// <param name="metric"> (Optional) The metric (see <see cref="EMetric"/>). </param>
/// <param name="alpha"> (Optional) Position on the Geodesic : \f$ 0\leq \text{alpha} \leq 1\f$. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>False</c> otherwise. </returns>
bool Geodesic(const Eigen::MatrixXd& a, const Eigen::MatrixXd& b, Eigen::MatrixXd& g, EMetric metric = EMetric::Riemann, double alpha = 0.5);
/// <summary> Compute the matrix at the position alpha on the Riemannian geodesic between A and B. \n
......@@ -36,25 +36,37 @@ bool Geodesic(const Eigen::MatrixXd& a, const Eigen::MatrixXd& b, Eigen::MatrixX
/// <param name="b"> The Second Covariance matrix. </param>
/// <param name="g"> The Geodesic. </param>
/// <param name="alpha"> (Optional) Position on the Geodesic : \f$ 0\leq \text{alpha} \leq 1\f$. </param>
/// <returns> <c>True</c> if it succeeds, <c>false</c> otherwise. </returns>
/// <returns> <c>True</c> if it succeeds, <c>