Mentions légales du service

Skip to content
Snippets Groups Projects
Commit 8d1b305c authored by MONSEIGNE Thibaut's avatar MONSEIGNE Thibaut
Browse files

:twisted_rightwards_arrows: Merge branch...

:twisted_rightwards_arrows: Merge branch '160-update-classification-artifact-clang-disgnostique' into 'development'

Resolve "Update: Classification/Artifact Clang Diagnostique"

See merge request !181
parents 427465fb d91d7566
No related branches found
No related tags found
3 merge requests!235Release 3.5.0,!224Release 3.4.0,!181Resolve "Update: Classification/Artifact Clang Diagnostique"
......@@ -45,11 +45,3 @@ install(TARGETS ${PROJECT_NAME}
set(SUB_DIR_NAME artifact)
install(DIRECTORY box-tutorials/ DESTINATION ${DIST_DATADIR}/openvibe/scenarios/box-tutorials/${SUB_DIR_NAME})
#install(DIRECTORY bci-examples/ DESTINATION ${DIST_DATADIR}/openvibe/scenarios/bci-examples/${SUB_DIR_NAME})
# ---------------------------------
# Test applications
# ---------------------------------
if (OV_COMPILE_TESTS)
#ADD_SUBDIRECTORY(test)
endif ()
......@@ -9,14 +9,14 @@ namespace Artifact {
bool CBoxAlgorithmASRProcessor::initialize()
{
//***** Codecs *****
m_SignalDecoder.initialize(*this, 0);
m_signalDecoder.initialize(*this, 0);
m_stimulationEncoder.initialize(*this, 0);
m_signalEncoder.initialize(*this, 1);
m_signalEncoder.getInputSamplingRate().setReferenceTarget(m_SignalDecoder.getOutputSamplingRate()); // Link Sampling
m_signalEncoder.getInputMatrix().setReferenceTarget(m_SignalDecoder.getOutputMatrix()); // Link Matrix
m_signalEncoder.getInputSamplingRate().setReferenceTarget(m_signalDecoder.getOutputSamplingRate()); // Link Sampling
m_signalEncoder.getInputMatrix().setReferenceTarget(m_signalDecoder.getOutputMatrix()); // Link Matrix
//***** Pointers *****
m_iMatrix = m_SignalDecoder.getOutputMatrix();
m_iMatrix = m_signalDecoder.getOutputMatrix();
m_oStimulation = m_stimulationEncoder.getInputStimulationSet();
m_oMatrix = m_signalEncoder.getInputMatrix();
......@@ -33,7 +33,7 @@ bool CBoxAlgorithmASRProcessor::initialize()
//---------------------------------------------------------------------------------------------------
bool CBoxAlgorithmASRProcessor::uninitialize()
{
m_SignalDecoder.uninitialize();
m_signalDecoder.uninitialize();
m_stimulationEncoder.uninitialize();
m_signalEncoder.uninitialize();
......@@ -54,18 +54,18 @@ bool CBoxAlgorithmASRProcessor::process()
{
Kernel::IBoxIO& boxCtx = this->getDynamicBoxContext();
for (size_t i = 0; i < boxCtx.getInputChunkCount(0); ++i) {
m_SignalDecoder.decode(i); // Decode the chunk
m_signalDecoder.decode(i); // Decode the chunk
OV_ERROR_UNLESS_KRF(m_iMatrix->getDimensionCount() == 2, "Invalid Input Signal", Kernel::ErrorType::BadInput);
const uint64_t start = boxCtx.getInputChunkStartTime(0, i), // Time Code Chunk Start
end = boxCtx.getInputChunkEndTime(0, i); // Time Code Chunk End
if (m_SignalDecoder.isHeaderReceived()) // Header received
if (m_signalDecoder.isHeaderReceived()) // Header received
{
m_signalEncoder.encodeHeader();
m_stimulationEncoder.encodeHeader();
boxCtx.markOutputAsReadyToSend(0, start, end);
}
if (m_SignalDecoder.isBufferReceived()) // Buffer received
if (m_signalDecoder.isBufferReceived()) // Buffer received
{
const bool prevTrivial = m_asr.getTrivial();
Eigen::MatrixXd in, out;
......@@ -82,7 +82,7 @@ bool CBoxAlgorithmASRProcessor::process()
boxCtx.markOutputAsReadyToSend(0, start, end);
}
}
if (m_SignalDecoder.isEndReceived()) // Buffer received
if (m_signalDecoder.isEndReceived()) // Buffer received
{
m_signalEncoder.encodeEnd();
m_stimulationEncoder.encodeEnd();
......
......@@ -35,7 +35,7 @@ public:
protected:
//***** Codecs *****
Toolkit::TSignalDecoder<CBoxAlgorithmASRProcessor> m_SignalDecoder; ///< Input Signal Decoder
Toolkit::TSignalDecoder<CBoxAlgorithmASRProcessor> m_signalDecoder; ///< Input Signal Decoder
Toolkit::TStimulationEncoder<CBoxAlgorithmASRProcessor> m_stimulationEncoder; ///< Output Stimulation Encoder
Toolkit::TSignalEncoder<CBoxAlgorithmASRProcessor> m_signalEncoder; ///< Output Signal Encoder
......
......@@ -84,24 +84,22 @@ bool CAlgorithmClassifierMLP::train(const Toolkit::IFeatureVectorSet& dataset)
m_labels.clear();
this->initializeExtraParameterMechanism();
size_t hiddenNeuronCount = size_t(this->getInt64Parameter(OVP_Algorithm_ClassifierMLP_InputParameterId_HiddenNeuronCount));
double alpha = this->getDoubleParameter(OVP_Algorithm_ClassifierMLP_InputParameterId_Alpha);
double epsilon = this->getDoubleParameter(OVP_Algorithm_ClassifierMLP_InputParameterId_Epsilon);
Eigen::Index hiddenNeuronCount = Eigen::Index(this->getInt64Parameter(OVP_Algorithm_ClassifierMLP_InputParameterId_HiddenNeuronCount));
double alpha = this->getDoubleParameter(OVP_Algorithm_ClassifierMLP_InputParameterId_Alpha);
double epsilon = this->getDoubleParameter(OVP_Algorithm_ClassifierMLP_InputParameterId_Epsilon);
this->uninitializeExtraParameterMechanism();
if (hiddenNeuronCount < 1)
{
if (hiddenNeuronCount < 1) {
this->getLogManager() << Kernel::LogLevel_Error << "Invalid amount of neuron in the hidden layer. Fallback to default value (3)\n";
hiddenNeuronCount = 3;
}
if (alpha <= 0)
{
if (alpha <= 0) {
this->getLogManager() << Kernel::LogLevel_Error << "Invalid value for learning coefficient (" << alpha << "). Fallback to default value (0.01)\n";
alpha = 0.01;
}
if (epsilon <= 0)
{
this->getLogManager() << Kernel::LogLevel_Error << "Invalid value for stop learning condition (" << epsilon << "). Fallback to default value (0.000001)\n";
if (epsilon <= 0) {
this->getLogManager() << Kernel::LogLevel_Error << "Invalid value for stop learning condition (" << epsilon <<
"). Fallback to default value (0.000001)\n";
epsilon = 0.000001;
}
......@@ -112,25 +110,23 @@ bool CAlgorithmClassifierMLP::train(const Toolkit::IFeatureVectorSet& dataset)
size_t validationElementCount = 0;
//We generate the list of class
for (auto iter = classCount.begin(); iter != classCount.end(); ++iter)
{
for (auto iter = classCount.begin(); iter != classCount.end(); ++iter) {
//We keep 20% percent of the training set for the validation for each class
validationElementCount += size_t(iter->second * 0.2);
validationElementCount += size_t(double(iter->second) * 0.2);
m_labels.push_back(iter->first);
iter->second = size_t(iter->second * 0.2);
iter->second = size_t(double(iter->second) * 0.2);
}
const size_t nbClass = m_labels.size();
const size_t nFeature = dataset.getFeatureVector(0).getSize();
const Eigen::Index nbClass = Eigen::Index(m_labels.size());
const Eigen::Index nFeature = Eigen::Index(dataset.getFeatureVector(0).getSize());
//Generate the target vector for each class. To save time and memory, we compute only one vector per class
//Vector tagret looks like following [0 0 1 0] for class 3 (if 4 classes)
for (size_t i = 0; i < nbClass; ++i)
{
for (Eigen::Index i = 0; i < nbClass; ++i) {
Eigen::VectorXd oTarget = Eigen::VectorXd::Zero(nbClass);
//class 1 is at index 0
oTarget[size_t(m_labels[i])] = 1.;
targetList[m_labels[i]] = oTarget;
oTarget[Eigen::Index(m_labels[i])] = 1.;
targetList[m_labels[i]] = oTarget;
}
//We store each normalize vector we get for training. This not optimal in memory but avoid a lot of computation later
......@@ -143,18 +139,15 @@ bool CAlgorithmClassifierMLP::train(const Toolkit::IFeatureVectorSet& dataset)
//We don't need to make a shuffle it has already be made by the trainer box
//We store 20% of the feature vectors for validation
int validationIndex = 0, trainingIndex = 0;
for (size_t i = 0; i < dataset.getFeatureVectorCount(); ++i)
{
const Eigen::Map<Eigen::VectorXd> oFeatureVec(const_cast<double*>(dataset.getFeatureVector(i).getBuffer()), nFeature);
for (size_t i = 0; i < dataset.getFeatureVectorCount(); ++i) {
const Eigen::Map<Eigen::VectorXd> oFeatureVec(const_cast<double*>(dataset.getFeatureVector(i).getBuffer()), Eigen::Index(nFeature));
Eigen::VectorXd oData = oFeatureVec;
if (classCount[dataset.getFeatureVector(i).getLabel()] > 0)
{
if (classCount[dataset.getFeatureVector(i).getLabel()] > 0) {
oValidationDataMatrix.col(validationIndex++) = oData;
oValidationSet.push_back(dataset.getFeatureVector(i).getLabel());
--classCount[dataset.getFeatureVector(i).getLabel()];
}
else
{
else {
oTrainingDataMatrix.col(trainingIndex++) = oData;
oTrainingSet.push_back(dataset.getFeatureVector(i).getLabel());
}
......@@ -164,23 +157,19 @@ bool CAlgorithmClassifierMLP::train(const Toolkit::IFeatureVectorSet& dataset)
m_max = oTrainingDataMatrix.maxCoeff();
m_min = oTrainingDataMatrix.minCoeff();
//Normalization of the data. We need to do it to avoid saturation of tanh.
for (size_t i = 0; i < size_t(oTrainingDataMatrix.cols()); ++i)
{
for (size_t j = 0; j < size_t(oTrainingDataMatrix.rows()); ++j)
{
for (Eigen::Index i = 0; i < oTrainingDataMatrix.cols(); ++i) {
for (Eigen::Index j = 0; j < oTrainingDataMatrix.rows(); ++j) {
oTrainingDataMatrix(j, i) = 2 * (oTrainingDataMatrix(j, i) - m_min) / (m_max - m_min) - 1;
}
}
for (size_t i = 0; i < size_t(oValidationDataMatrix.cols()); ++i)
{
for (size_t j = 0; j < size_t(oValidationDataMatrix.rows()); ++j)
{
for (Eigen::Index i = 0; i < oValidationDataMatrix.cols(); ++i) {
for (Eigen::Index j = 0; j < oValidationDataMatrix.rows(); ++j) {
oValidationDataMatrix(j, i) = 2 * (oValidationDataMatrix(j, i) - m_min) / (m_max - m_min) - 1;
}
}
const double featureCount = double(oTrainingSet.size());
const double boundValue = 1. / (nFeature + 1);
const double boundValue = 1.0 / double(nFeature + 1);
double previousError = std::numeric_limits<double>::max();
double cumulativeError = 0;
......@@ -202,8 +191,7 @@ bool CAlgorithmClassifierMLP::train(const Toolkit::IFeatureVectorSet& dataset)
//Y1 is the output vector of hidden layer
//A2 is the value compute by output neuron before applying transfer function
//Y2 is the value of output after the transfer function (softmax)
while (true)
{
while (true) {
oDeltaInputWeight.setZero();
oDeltaInputBias.setZero();
oDeltaHiddenWeight.setZero();
......@@ -212,28 +200,27 @@ bool CAlgorithmClassifierMLP::train(const Toolkit::IFeatureVectorSet& dataset)
oY1.noalias() = ((m_inputWeight * oTrainingDataMatrix).colwise() + m_inputBias).unaryExpr(
std::ptr_fun<double, double>(static_cast<double(*)(double)>(tanh)));
oA2.noalias() = (m_hiddenWeight * oY1).colwise() + m_hiddenBias;
for (size_t i = 0; i < featureCount; ++i)
{
for (Eigen::Index i = 0; i < Eigen::Index(featureCount); ++i) {
const Eigen::VectorXd& oTarget = targetList[oTrainingSet[i]];
const Eigen::VectorXd& oData = oTrainingDataMatrix.col(i);
//Now we compute all deltas of output layer
Eigen::VectorXd oOutputDelta = oA2.col(i) - oTarget;
for (size_t j = 0; j < nbClass; ++j)
{
for (size_t k = 0; k < hiddenNeuronCount; ++k) { oDeltaHiddenWeight(j, k) -= oOutputDelta[j] * oY1.col(i)[k]; }
for (Eigen::Index j = 0; j < nbClass; ++j) {
for (Eigen::Index k = 0; k < hiddenNeuronCount; ++k) { oDeltaHiddenWeight(j, k) -= oOutputDelta[j] * oY1.col(i)[k]; }
}
oDeltaHiddenBias.noalias() -= oOutputDelta;
//Now we take care of the hidden layer
Eigen::VectorXd oHiddenDelta = Eigen::VectorXd::Zero(hiddenNeuronCount);
for (size_t j = 0; j < hiddenNeuronCount; ++j)
{
for (size_t k = 0; k < nbClass; ++k) { oHiddenDelta[j] += oOutputDelta[k] * m_hiddenWeight(k, j); }
for (Eigen::Index j = 0; j < hiddenNeuronCount; ++j) {
for (Eigen::Index k = 0; k < nbClass; ++k) { oHiddenDelta[j] += oOutputDelta[k] * m_hiddenWeight(k, j); }
oHiddenDelta[j] *= (1 - pow(oY1.col(i)[j], 2));
}
for (size_t j = 0; j < hiddenNeuronCount; ++j) { for (size_t k = 0; k < nFeature; ++k) { oDeltaInputWeight(j, k) -= oHiddenDelta[j] * oData[k]; } }
for (Eigen::Index j = 0; j < hiddenNeuronCount; ++j) {
for (Eigen::Index k = 0; k < nFeature; ++k) { oDeltaInputWeight(j, k) -= oHiddenDelta[j] * oData[k]; }
}
oDeltaInputBias.noalias() -= oHiddenDelta;
}
//We finish the loop, let's apply deltas
......@@ -252,15 +239,14 @@ bool CAlgorithmClassifierMLP::train(const Toolkit::IFeatureVectorSet& dataset)
//We don't compute Y2 because we train on the identity
oA2.noalias() = (m_hiddenWeight * ((m_inputWeight * oValidationDataMatrix).colwise() + m_inputBias).unaryExpr(std::ptr_fun<double, double>(tanh))).
colwise() + m_hiddenBias;
for (size_t i = 0; i < oValidationSet.size(); ++i)
{
for (Eigen::Index i = 0; i < Eigen::Index(oValidationSet.size()); ++i) {
const Eigen::VectorXd& oTarget = targetList[oValidationSet[i]];
const Eigen::VectorXd& oIdentityResult = oA2.col(i);
//Now we need to compute the error
for (size_t j = 0; j < nbClass; ++j) { cumulativeError += 0.5 * pow(oIdentityResult[j] - oTarget[j], 2); }
for (Eigen::Index j = 0; j < nbClass; ++j) { cumulativeError += 0.5 * pow(oIdentityResult[j] - oTarget[j], 2); }
}
cumulativeError /= oValidationSet.size();
cumulativeError /= double(oValidationSet.size());
//If the delta of error is under Epsilon we consider that the training is over
if (previousError - cumulativeError < epsilon) { break; }
previousError = cumulativeError;
......@@ -274,16 +260,16 @@ bool CAlgorithmClassifierMLP::train(const Toolkit::IFeatureVectorSet& dataset)
bool CAlgorithmClassifierMLP::classify(const Toolkit::IFeatureVector& sample, double& classLabel, Toolkit::IVector& distance, Toolkit::IVector& probability)
{
if (sample.getSize() != size_t(m_inputWeight.cols()))
{
this->getLogManager() << Kernel::LogLevel_Error << "Classifier expected " << size_t(m_inputWeight.cols()) << " features, got " << sample.getSize() << "\n";
if (sample.getSize() != size_t(m_inputWeight.cols())) {
this->getLogManager() << Kernel::LogLevel_Error << "Classifier expected " << size_t(m_inputWeight.cols()) << " features, got " << sample.getSize()
<< "\n";
return false;
}
const Eigen::Map<Eigen::VectorXd> oFeatureVec(const_cast<double*>(sample.getBuffer()), sample.getSize());
const Eigen::Map<Eigen::VectorXd> oFeatureVec(const_cast<double*>(sample.getBuffer()), Eigen::Index(sample.getSize()));
Eigen::VectorXd oData = oFeatureVec;
//we normalize and center data on 0 to avoid saturation
for (size_t j = 0; j < sample.getSize(); ++j) { oData[j] = 2 * (oData[j] - m_min) / (m_max - m_min) - 1; }
for (Eigen::Index j = 0; j < Eigen::Index(sample.getSize()); ++j) { oData[j] = 2 * (oData[j] - m_min) / (m_max - m_min) - 1; }
const size_t classCount = m_labels.size();
......@@ -301,10 +287,8 @@ bool CAlgorithmClassifierMLP::classify(const Toolkit::IFeatureVector& sample, do
size_t classFound = 0;
distance[0] = oA2[0];
probability[0] = oY2[0];
for (size_t i = 1; i < classCount; ++i)
{
if (oY2[i] > max)
{
for (Eigen::Index i = 1; i < Eigen::Index(classCount); ++i) {
if (oY2[i] > max) {
max = oY2[i];
classFound = i;
}
......@@ -374,7 +358,7 @@ bool CAlgorithmClassifierMLP::loadConfig(XML::IXMLNode* configNode)
while (data >> temp) { m_labels.push_back(temp); }
int64_t featureSize, hiddenNeuronCount;
XML::IXMLNode* neuronConfigNode = configNode->getChildByName(MLP_NEURON_CONFIG_NODE_NAME);
const XML::IXMLNode* neuronConfigNode = configNode->getChildByName(MLP_NEURON_CONFIG_NODE_NAME);
loadData(neuronConfigNode->getChildByName(MLP_HIDDEN_NEURON_COUNT_NODE_NAME), hiddenNeuronCount);
loadData(neuronConfigNode->getChildByName(MLP_INPUT_NEURON_COUNT_NODE_NAME), featureSize);
......@@ -395,7 +379,7 @@ void CAlgorithmClassifierMLP::dumpData(XML::IXMLNode* node, Eigen::MatrixXd& mat
std::stringstream data;
data << std::scientific;
for (size_t i = 0; i < size_t(matrix.rows()); ++i) { for (size_t j = 0; j < size_t(matrix.cols()); ++j) { data << " " << matrix(i, j); } }
for (Eigen::Index i = 0; i < matrix.rows(); ++i) { for (Eigen::Index j = 0; j < matrix.cols(); ++j) { data << " " << matrix(i, j); } }
node->setPCData(data.str().c_str());
}
......@@ -405,7 +389,7 @@ void CAlgorithmClassifierMLP::dumpData(XML::IXMLNode* node, Eigen::VectorXd& vec
std::stringstream data;
data << std::scientific;
for (size_t i = 0; i < size_t(vector.size()); ++i) { data << " " << vector[i]; }
for (Eigen::Index i = 0; i < vector.size(); ++i) { data << " " << vector[i]; }
node->setPCData(data.str().c_str());
}
......@@ -425,7 +409,7 @@ void CAlgorithmClassifierMLP::dumpData(XML::IXMLNode* node, const double value)
node->setPCData(data.str().c_str());
}
void CAlgorithmClassifierMLP::loadData(XML::IXMLNode* node, Eigen::MatrixXd& matrix, const size_t nRow, const size_t nCol)
void CAlgorithmClassifierMLP::loadData(const XML::IXMLNode* node, Eigen::MatrixXd& matrix, const size_t nRow, const size_t nCol)
{
matrix = Eigen::MatrixXd(nRow, nCol);
std::stringstream data(node->getPCData());
......@@ -435,17 +419,15 @@ void CAlgorithmClassifierMLP::loadData(XML::IXMLNode* node, Eigen::MatrixXd& mat
while (data >> value) { coefs.push_back(value); }
size_t index = 0;
for (size_t i = 0; i < nRow; ++i)
{
for (size_t j = 0; j < nCol; ++j)
{
for (size_t i = 0; i < nRow; ++i) {
for (size_t j = 0; j < nCol; ++j) {
matrix(int(i), int(j)) = coefs[index];
++index;
}
}
}
void CAlgorithmClassifierMLP::loadData(XML::IXMLNode* node, Eigen::VectorXd& vector)
void CAlgorithmClassifierMLP::loadData(const XML::IXMLNode* node, Eigen::VectorXd& vector)
{
std::stringstream data(node->getPCData());
std::vector<double> coefs;
......@@ -453,16 +435,16 @@ void CAlgorithmClassifierMLP::loadData(XML::IXMLNode* node, Eigen::VectorXd& vec
while (data >> value) { coefs.push_back(value); }
vector = Eigen::VectorXd(coefs.size());
for (size_t i = 0; i < coefs.size(); ++i) { vector[i] = coefs[i]; }
for (Eigen::Index i = 0; i < Eigen::Index(coefs.size()); ++i) { vector[i] = coefs[i]; }
}
void CAlgorithmClassifierMLP::loadData(XML::IXMLNode* node, int64_t& value)
void CAlgorithmClassifierMLP::loadData(const XML::IXMLNode* node, int64_t& value)
{
std::stringstream data(node->getPCData());
data >> value;
}
void CAlgorithmClassifierMLP::loadData(XML::IXMLNode* node, double& value)
void CAlgorithmClassifierMLP::loadData(const XML::IXMLNode* node, double& value)
{
std::stringstream data(node->getPCData());
data >> value;
......
......@@ -46,10 +46,10 @@ private:
static void dumpData(XML::IXMLNode* node, int64_t value);
static void dumpData(XML::IXMLNode* node, double value);
static void loadData(XML::IXMLNode* node, Eigen::MatrixXd& matrix, size_t nRow, size_t nCol);
static void loadData(XML::IXMLNode* node, Eigen::VectorXd& vector);
static void loadData(XML::IXMLNode* node, int64_t& value);
static void loadData(XML::IXMLNode* node, double& value);
static void loadData(const XML::IXMLNode* node, Eigen::MatrixXd& matrix, size_t nRow, size_t nCol);
static void loadData(const XML::IXMLNode* node, Eigen::VectorXd& vector);
static void loadData(const XML::IXMLNode* node, int64_t& value);
static void loadData(const XML::IXMLNode* node, double& value);
std::vector<double> m_labels;
......
......@@ -6,6 +6,7 @@
#include <iostream>
#include <cstring>
#include <cmath>
#include <cfloat> // DBL_EPSILON
namespace OpenViBE {
namespace Plugins {
......@@ -86,8 +87,7 @@ bool CAlgorithmClassifierSVM::initialize()
bool CAlgorithmClassifierSVM::uninitialize()
{
if (m_prob.x != nullptr && m_prob.y != nullptr)
{
if (m_prob.x != nullptr && m_prob.y != nullptr) {
for (size_t i = 0; i < size_t(m_prob.l); ++i) { delete[] m_prob.x[i]; }
delete[] m_prob.y;
delete[] m_prob.x;
......@@ -95,14 +95,12 @@ bool CAlgorithmClassifierSVM::uninitialize()
m_prob.x = nullptr;
}
if (m_param.weight != nullptr)
{
if (m_param.weight != nullptr) {
delete[] m_param.weight;
m_param.weight = nullptr;
}
if (m_param.weight_label != nullptr)
{
if (m_param.weight_label != nullptr) {
delete[] m_param.weight_label;
m_param.weight_label = nullptr;
}
......@@ -116,8 +114,7 @@ bool CAlgorithmClassifierSVM::uninitialize()
void CAlgorithmClassifierSVM::deleteModel(svm_model* model, const bool freeSupportVectors)
{
if (model != nullptr)
{
if (model != nullptr) {
delete[] model->rho;
delete[] model->probA;
delete[] model->probB;
......@@ -165,7 +162,7 @@ void CAlgorithmClassifierSVM::setParameter()
double value;
while (ssWeight >> value) { weights.push_back(value); }
m_param.nr_weight = weights.size();
m_param.nr_weight = int(weights.size());
double* weight = new double[weights.size()];
for (uint32_t i = 0; i < weights.size(); ++i) { weight[i] = weights[i]; }
m_param.weight = weight;//nullptr;
......@@ -176,7 +173,7 @@ void CAlgorithmClassifierSVM::setParameter()
while (ssLabel >> iValue) { labels.push_back(iValue); }
//the number of weight label need to be equal to the number of weight
while (labels.size() < weights.size()) { labels.push_back(labels.size() + 1); }
while (labels.size() < weights.size()) { labels.push_back(int64_t(labels.size() + 1)); }
int* label = new int[weights.size()];
for (size_t i = 0; i < weights.size(); ++i) { label[i] = int(labels[i]); }
......@@ -185,8 +182,7 @@ void CAlgorithmClassifierSVM::setParameter()
bool CAlgorithmClassifierSVM::train(const Toolkit::IFeatureVectorSet& dataset)
{
if (m_prob.x != nullptr && m_prob.y != nullptr)
{
if (m_prob.x != nullptr && m_prob.y != nullptr) {
for (size_t i = 0; i < size_t(m_prob.l); ++i) { delete[] m_prob.x[i]; }
delete[] m_prob.y;
delete[] m_prob.x;
......@@ -200,7 +196,7 @@ bool CAlgorithmClassifierSVM::train(const Toolkit::IFeatureVectorSet& dataset)
//configure m_prob
//std::cout<<"prob config"<<std::endl;
m_prob.l = dataset.getFeatureVectorCount();
m_prob.l = int(dataset.getFeatureVectorCount());
m_nFeatures = dataset[0].getSize();
m_prob.y = new double[m_prob.l];
......@@ -208,12 +204,10 @@ bool CAlgorithmClassifierSVM::train(const Toolkit::IFeatureVectorSet& dataset)
//std::cout<< "number vector:"<<l_oProb.l<<" size of vector:"<<m_nFeatures<<std::endl;
for (size_t i = 0; i < size_t(m_prob.l); ++i)
{
for (size_t i = 0; i < size_t(m_prob.l); ++i) {
m_prob.x[i] = new svm_node[m_nFeatures + 1];
m_prob.y[i] = dataset[i].getLabel();
for (size_t j = 0; j < m_nFeatures; ++j)
{
for (size_t j = 0; j < m_nFeatures; ++j) {
m_prob.x[i][j].index = int(j + 1);
m_prob.x[i][j].value = dataset[i].getBuffer()[j];
}
......@@ -221,19 +215,15 @@ bool CAlgorithmClassifierSVM::train(const Toolkit::IFeatureVectorSet& dataset)
}
// Gamma of zero is interpreted as a request for automatic selection
if (m_param.gamma == 0) { m_param.gamma = 1.0 / (m_nFeatures > 0 ? m_nFeatures : 1.0); }
if (std::fabs(m_param.gamma) <= DBL_EPSILON) { m_param.gamma = 1.0 / (m_nFeatures > 0 ? double(m_nFeatures) : 1.0); }
if (m_param.kernel_type == PRECOMPUTED)
{
for (size_t i = 0; i < size_t(m_prob.l); ++i)
{
if (m_prob.x[i][0].index != 0)
{
if (m_param.kernel_type == PRECOMPUTED) {
for (size_t i = 0; i < size_t(m_prob.l); ++i) {
if (m_prob.x[i][0].index != 0) {
this->getLogManager() << Kernel::LogLevel_Error << "Wrong input format: first column must be 0:sample_serial_number\n";
return false;
}
if (m_prob.x[i][0].value <= 0 || m_prob.x[i][0].value > m_nFeatures)
{
if (m_prob.x[i][0].value <= 0 || m_prob.x[i][0].value > double(m_nFeatures)) {
this->getLogManager() << Kernel::LogLevel_Error << "Wrong input format: sample_serial_number out of range\n";
return false;
}
......@@ -244,8 +234,7 @@ bool CAlgorithmClassifierSVM::train(const Toolkit::IFeatureVectorSet& dataset)
//make a model
//std::cout<<"svm_train"<<std::endl;
if (m_model != nullptr)
{
if (m_model != nullptr) {
//std::cout<<"delete model"<<std::endl;
deleteModel(m_model, !m_modelWasTrained);
m_model = nullptr;
......@@ -253,8 +242,7 @@ bool CAlgorithmClassifierSVM::train(const Toolkit::IFeatureVectorSet& dataset)
}
m_model = svm_train(&m_prob, &m_param);
if (m_model == nullptr)
{
if (m_model == nullptr) {
this->getLogManager() << Kernel::LogLevel_Error << "the training with SVM had failed\n";
return false;
}
......@@ -270,33 +258,28 @@ bool CAlgorithmClassifierSVM::train(const Toolkit::IFeatureVectorSet& dataset)
bool CAlgorithmClassifierSVM::classify(const Toolkit::IFeatureVector& sample, double& classLabel, Toolkit::IVector& distance, Toolkit::IVector& probability)
{
//std::cout<<"classify"<<std::endl;
if (m_model == nullptr)
{
if (m_model == nullptr) {
this->getLogManager() << Kernel::LogLevel_Error << "Classification is impossible with a model equalling nullptr\n";
return false;
}
if (m_model->nr_class == 0 || m_model->rho == nullptr)
{
if (m_model->nr_class == 0 || m_model->rho == nullptr) {
this->getLogManager() << Kernel::LogLevel_Error << "The model wasn't loaded correctly\n";
return false;
}
if (m_nFeatures != sample.getSize())
{
if (m_nFeatures != sample.getSize()) {
this->getLogManager() << Kernel::LogLevel_Error << "Classifier expected " << m_nFeatures << " features, got " << sample.getSize() << "\n";
return false;
}
if (m_model->param.gamma == 0 &&
(m_model->param.kernel_type == POLY || m_model->param.kernel_type == RBF || m_model->param.kernel_type == SIGMOID))
{
m_model->param.gamma = 1.0 / (m_nFeatures > 0 ? m_nFeatures : 1.0);
if (std::fabs(m_model->param.gamma) <= DBL_EPSILON &&
(m_model->param.kernel_type == POLY || m_model->param.kernel_type == RBF || m_model->param.kernel_type == SIGMOID)) {
m_model->param.gamma = 1.0 / (m_nFeatures > 0 ? double(m_nFeatures) : 1.0);
this->getLogManager() << Kernel::LogLevel_Warning << "The SVM model had gamma=0. Setting it to [" << m_model->param.gamma << "].\n";
}
//std::cout<<"create X"<<std::endl;
svm_node* x = new svm_node[sample.getSize() + 1];
//std::cout<<"featureVector.getSize():"<<featureVector.getSize()<<"m_numberOfFeatures"<<m_numberOfFeatures<<std::endl;
for (uint32_t i = 0; i < sample.getSize(); ++i)
{
for (uint32_t i = 0; i < sample.getSize(); ++i) {
x[i].index = int(i + 1);
x[i].value = sample.getBuffer()[i];
//std::cout<< X[i].index << ";"<<X[i].value<<" ";
......@@ -313,13 +296,11 @@ bool CAlgorithmClassifierSVM::classify(const Toolkit::IFeatureVector& sample, do
//std::cout<<"probability"<<std::endl;
//If we are not in these modes, label is nullptr and there is no probability
if (m_model->param.svm_type == C_SVC || m_model->param.svm_type == NU_SVC)
{
if (m_model->param.svm_type == C_SVC || m_model->param.svm_type == NU_SVC) {
probability.setSize(m_model->nr_class);
this->getLogManager() << Kernel::LogLevel_Trace << "Label predict: " << classLabel << "\n";
for (size_t i = 0; i < size_t(m_model->nr_class); ++i)
{
for (size_t i = 0; i < size_t(m_model->nr_class); ++i) {
this->getLogManager() << Kernel::LogLevel_Trace << "index:" << i << " label:" << m_model->label[i] << " probability:" << probEstimates[i] << "\n";
probability[(m_model->label[i])] = probEstimates[i];
}
......@@ -353,8 +334,7 @@ XML::IXMLNode* CAlgorithmClassifierSVM::saveConfig()
for (size_t i = 1; i < size_t(m_model->nr_class * (m_model->nr_class - 1) / 2); ++i) { ssRho << " " << m_model->rho[i]; }
//std::cout<<"model save: sv_coef and SV"<<std::endl;
for (size_t i = 0; i < size_t(m_model->l); ++i)
{
for (size_t i = 0; i < size_t(m_model->l); ++i) {
std::stringstream ssCoef;
std::stringstream ssValue;
......@@ -364,15 +344,12 @@ XML::IXMLNode* CAlgorithmClassifierSVM::saveConfig()
const svm_node* p = m_model->SV[i];
if (m_model->param.kernel_type == PRECOMPUTED) { ssValue << "0:" << double(p->value); }
else
{
if (p->index != -1)
{
else {
if (p->index != -1) {
ssValue << p->index << ":" << p->value;
p++;
}
while (p->index != -1)
{
while (p->index != -1) {
ssValue << " " << p->index << ":" << p->value;
p++;
}
......@@ -393,8 +370,7 @@ XML::IXMLNode* CAlgorithmClassifierSVM::saveConfig()
tempNode->setPCData(get_kernel_type(m_model->param.kernel_type));
paramNode->addChild(tempNode);
if (m_model->param.kernel_type == POLY)
{
if (m_model->param.kernel_type == POLY) {
std::stringstream ss;
ss << m_model->param.degree;
......@@ -402,8 +378,7 @@ XML::IXMLNode* CAlgorithmClassifierSVM::saveConfig()
tempNode->setPCData(ss.str().c_str());
paramNode->addChild(tempNode);
}
if (m_model->param.kernel_type == POLY || m_model->param.kernel_type == RBF || m_model->param.kernel_type == SIGMOID)
{
if (m_model->param.kernel_type == POLY || m_model->param.kernel_type == RBF || m_model->param.kernel_type == SIGMOID) {
std::stringstream ss;
ss << m_model->param.gamma;
......@@ -411,8 +386,7 @@ XML::IXMLNode* CAlgorithmClassifierSVM::saveConfig()
tempNode->setPCData(ss.str().c_str());
paramNode->addChild(tempNode);
}
if (m_model->param.kernel_type == POLY || m_model->param.kernel_type == SIGMOID)
{
if (m_model->param.kernel_type == POLY || m_model->param.kernel_type == SIGMOID) {
std::stringstream ss;
ss << m_model->param.coef0;
......@@ -442,8 +416,7 @@ XML::IXMLNode* CAlgorithmClassifierSVM::saveConfig()
tempNode->setPCData(ssRho.str().c_str());
modelNode->addChild(tempNode);
if (m_model->label != nullptr)
{
if (m_model->label != nullptr) {
std::stringstream ss;
ss << m_model->label[0];
for (size_t i = 1; i < size_t(m_model->nr_class); ++i) { ss << " " << m_model->label[i]; }
......@@ -452,8 +425,7 @@ XML::IXMLNode* CAlgorithmClassifierSVM::saveConfig()
tempNode->setPCData(ss.str().c_str());
modelNode->addChild(tempNode);
}
if (m_model->probA != nullptr)
{
if (m_model->probA != nullptr) {
std::stringstream ss;
ss << std::scientific << m_model->probA[0];
for (size_t i = 1; i < size_t(m_model->nr_class * (m_model->nr_class - 1) / 2); ++i) { ss << " " << m_model->probA[i]; }
......@@ -462,8 +434,7 @@ XML::IXMLNode* CAlgorithmClassifierSVM::saveConfig()
tempNode->setPCData(ss.str().c_str());
modelNode->addChild(tempNode);
}
if (m_model->probB != nullptr)
{
if (m_model->probB != nullptr) {
std::stringstream ss;
ss << std::scientific << m_model->probB[0];
for (size_t i = 1; i < size_t(m_model->nr_class * (m_model->nr_class - 1) / 2); ++i) { ss << " " << m_model->probB[i]; }
......@@ -472,8 +443,7 @@ XML::IXMLNode* CAlgorithmClassifierSVM::saveConfig()
tempNode->setPCData(ss.str().c_str());
modelNode->addChild(tempNode);
}
if (m_model->nSV != nullptr)
{
if (m_model->nSV != nullptr) {
std::stringstream ss;
ss << m_model->nSV[0];
for (size_t i = 1; i < size_t(m_model->nr_class); ++i) { ss << " " << m_model->nSV[i]; }
......@@ -485,8 +455,7 @@ XML::IXMLNode* CAlgorithmClassifierSVM::saveConfig()
XML::IXMLNode* svsNode = XML::createNode(SVS_NODE_NAME);
{
for (size_t i = 0; i < size_t(m_model->l); ++i)
{
for (size_t i = 0; i < size_t(m_model->l); ++i) {
XML::IXMLNode* svNode = XML::createNode(SV_NODE_NAME);
{
tempNode = XML::createNode(COEF_NODE_NAME);
......@@ -508,8 +477,7 @@ XML::IXMLNode* CAlgorithmClassifierSVM::saveConfig()
bool CAlgorithmClassifierSVM::loadConfig(XML::IXMLNode* configNode)
{
if (m_model != nullptr)
{
if (m_model != nullptr) {
//std::cout<<"delete m_model load config"<<std::endl;
deleteModel(m_model, !m_modelWasTrained);
m_model = nullptr;
......@@ -535,17 +503,15 @@ void CAlgorithmClassifierSVM::loadParamNodeConfiguration(XML::IXMLNode* paramNod
{
//svm_type
XML::IXMLNode* tempNode = paramNode->getChildByName(SVM_TYPE_NODE_NAME);
for (size_t i = 0; get_svm_type(i) != nullptr; ++i) { if (strcmp(get_svm_type(i), tempNode->getPCData()) == 0) { m_model->param.svm_type = i; } }
if (get_svm_type(m_model->param.svm_type) == nullptr)
{
for (int i = 0; get_svm_type(i) != nullptr; ++i) { if (strcmp(get_svm_type(i), tempNode->getPCData()) == 0) { m_model->param.svm_type = i; } }
if (get_svm_type(m_model->param.svm_type) == nullptr) {
this->getLogManager() << Kernel::LogLevel_Error << "load configuration error: bad value for the parameter svm_type\n";
}
//kernel_type
tempNode = paramNode->getChildByName(KERNEL_TYPE_NODE_NAME);
for (size_t i = 0; get_kernel_type(i) != nullptr; ++i) { if (strcmp(get_kernel_type(i), tempNode->getPCData()) == 0) { m_model->param.kernel_type = i; } }
if (get_kernel_type(m_model->param.kernel_type) == nullptr)
{
for (int i = 0; get_kernel_type(i) != nullptr; ++i) { if (strcmp(get_kernel_type(i), tempNode->getPCData()) == 0) { m_model->param.kernel_type = i; } }
if (get_kernel_type(m_model->param.kernel_type) == nullptr) {
this->getLogManager() << Kernel::LogLevel_Error << "load configuration error: bad value for the parameter kernel_type\n";
}
......@@ -553,24 +519,21 @@ void CAlgorithmClassifierSVM::loadParamNodeConfiguration(XML::IXMLNode* paramNod
//degree
tempNode = paramNode->getChildByName(DEGREE_NODE_NAME);
if (tempNode != nullptr)
{
if (tempNode != nullptr) {
std::stringstream ss(tempNode->getPCData());
ss >> m_model->param.degree;
}
//gamma
tempNode = paramNode->getChildByName(GAMMA_NODE_NAME);
if (tempNode != nullptr)
{
if (tempNode != nullptr) {
std::stringstream ss(tempNode->getPCData());
ss >> m_model->param.gamma;
}
//coef0
tempNode = paramNode->getChildByName(COEF0_NODE_NAME);
if (tempNode != nullptr)
{
if (tempNode != nullptr) {
std::stringstream ss(tempNode->getPCData());
ss >> m_model->param.coef0;
}
......@@ -594,32 +557,28 @@ void CAlgorithmClassifierSVM::loadModelNodeConfiguration(XML::IXMLNode* modelNod
//label
tempNode = modelNode->getChildByName(LABEL_NODE_NAME);
if (tempNode != nullptr)
{
if (tempNode != nullptr) {
std::stringstream ss(tempNode->getPCData());
m_model->label = new int[m_model->nr_class];
for (size_t i = 0; i < size_t(m_model->nr_class); ++i) { ss >> m_model->label[i]; }
}
//probA
tempNode = modelNode->getChildByName(PROB_A_NODE_NAME);
if (tempNode != nullptr)
{
if (tempNode != nullptr) {
std::stringstream ss(tempNode->getPCData());
m_model->probA = new double[m_model->nr_class * (m_model->nr_class - 1) / 2];
for (size_t i = 0; i < size_t(m_model->nr_class * (m_model->nr_class - 1) / 2); ++i) { ss >> m_model->probA[i]; }
}
//probB
tempNode = modelNode->getChildByName(PROB_B_NODE_NAME);
if (tempNode != nullptr)
{
if (tempNode != nullptr) {
std::stringstream ss(tempNode->getPCData());
m_model->probB = new double[m_model->nr_class * (m_model->nr_class - 1) / 2];
for (size_t i = 0; i < size_t(m_model->nr_class * (m_model->nr_class - 1) / 2); ++i) { ss >> m_model->probB[i]; }
}
//nr_sv
tempNode = modelNode->getChildByName(NR_SV_NODE_NAME);
if (tempNode != nullptr)
{
if (tempNode != nullptr) {
std::stringstream ss(tempNode->getPCData());
m_model->nSV = new int[m_model->nr_class];
for (size_t i = 0; i < size_t(m_model->nr_class); ++i) { ss >> m_model->nSV[i]; }
......@@ -628,7 +587,7 @@ void CAlgorithmClassifierSVM::loadModelNodeConfiguration(XML::IXMLNode* modelNod
loadModelSVsNodeConfiguration(modelNode->getChildByName(SVS_NODE_NAME));
}
void CAlgorithmClassifierSVM::loadModelSVsNodeConfiguration(XML::IXMLNode* svsNodeParam)
void CAlgorithmClassifierSVM::loadModelSVsNodeConfiguration(const XML::IXMLNode* svsNodeParam)
{
//Reserve all memory space required
m_model->sv_coef = new double*[m_model->nr_class - 1];
......@@ -636,9 +595,8 @@ void CAlgorithmClassifierSVM::loadModelSVsNodeConfiguration(XML::IXMLNode* svsNo
m_model->SV = new svm_node*[m_model->l];
//Now fill SV
for (size_t i = 0; i < svsNodeParam->getChildCount(); ++i)
{
XML::IXMLNode* tempNode = svsNodeParam->getChild(i);
for (size_t i = 0; i < svsNodeParam->getChildCount(); ++i) {
const XML::IXMLNode* tempNode = svsNodeParam->getChild(i);
std::stringstream coefData(tempNode->getChildByName(COEF_NODE_NAME)->getPCData());
for (int j = 0; j < m_model->nr_class - 1; ++j) { coefData >> m_model->sv_coef[j][i]; }
......@@ -646,8 +604,7 @@ void CAlgorithmClassifierSVM::loadModelSVsNodeConfiguration(XML::IXMLNode* svsNo
std::vector<int> svmIdx;
std::vector<double> svmValue;
char separateChar;
while (!ss.eof())
{
while (!ss.eof()) {
int index;
double value;
ss >> index;
......@@ -659,8 +616,7 @@ void CAlgorithmClassifierSVM::loadModelSVsNodeConfiguration(XML::IXMLNode* svsNo
m_nFeatures = svmIdx.size();
m_model->SV[i] = new svm_node[svmIdx.size() + 1];
for (size_t j = 0; j < svmIdx.size(); ++j)
{
for (size_t j = 0; j < svmIdx.size(); ++j) {
m_model->SV[i][j].index = svmIdx[j];
m_model->SV[i][j].value = svmValue[j];
}
......@@ -707,36 +663,31 @@ CString CAlgorithmClassifierSVM::modelToString() const
ss << "\tnr_class: " << m_model->nr_class << "\n";
ss << "\ttotal_sv: " << m_model->l << "\n";
ss << "\trho: ";
if (m_model->rho != nullptr)
{
if (m_model->rho != nullptr) {
ss << m_model->rho[0];
for (size_t i = 1; i < size_t(m_model->nr_class * (m_model->nr_class - 1) / 2); ++i) { ss << " " << m_model->rho[i]; }
}
ss << "\n";
ss << "\tlabel: ";
if (m_model->label != nullptr)
{
if (m_model->label != nullptr) {
ss << m_model->label[0];
for (size_t i = 1; i < size_t(m_model->nr_class); ++i) { ss << " " << m_model->label[i]; }
}
ss << "\n";
ss << "\tprobA: ";
if (m_model->probA != nullptr)
{
if (m_model->probA != nullptr) {
ss << m_model->probA[0];
for (size_t i = 1; i < size_t(m_model->nr_class * (m_model->nr_class - 1) / 2); ++i) { ss << " " << m_model->probA[i]; }
}
ss << "\n";
ss << "\tprobB: ";
if (m_model->probB != nullptr)
{
if (m_model->probB != nullptr) {
ss << m_model->probB[0];
for (size_t i = 1; i < size_t(m_model->nr_class * (m_model->nr_class - 1) / 2); ++i) { ss << " " << m_model->probB[i]; }
}
ss << "\n";
ss << "\tnr_sv: ";
if (m_model->nSV != nullptr)
{
if (m_model->nSV != nullptr) {
ss << m_model->nSV[0];
for (size_t i = 1; i < size_t(m_model->nr_class); ++i) { ss << " " << m_model->nSV[i]; }
}
......
......@@ -77,14 +77,14 @@ protected:
private:
void loadParamNodeConfiguration(XML::IXMLNode* paramNode);
void loadModelNodeConfiguration(XML::IXMLNode* modelNode);
void loadModelSVsNodeConfiguration(XML::IXMLNode* svsNodeParam);
void loadModelSVsNodeConfiguration(const XML::IXMLNode* svsNodeParam);
void setParameter();
static void deleteModel(svm_model* model, bool freeSupportVectors);
};
class CAlgorithmClassifierSVMDesc : public Toolkit::CAlgorithmClassifierDesc
class CAlgorithmClassifierSVMDesc final : public Toolkit::CAlgorithmClassifierDesc
{
public:
void release() override { }
......
......@@ -39,8 +39,7 @@ bool CBoxAlgorithmOutlierRemoval::uninitialize()
m_sampleDecoder.uninitialize();
m_stimDecoder.uninitialize();
for (auto& data : m_datasets)
{
for (auto& data : m_datasets) {
delete data.sampleMatrix;
data.sampleMatrix = nullptr;
}
......@@ -62,8 +61,8 @@ bool CBoxAlgorithmOutlierRemoval::pruneSet(std::vector<feature_vector_t>& pruned
const size_t nSample = m_datasets.size(),
nFeatures = m_datasets[0].sampleMatrix->getDimensionSize(0),
lowerIdx = size_t(m_lowerQuantile * nSample),
upperIdx = size_t(m_upperQuantile * nSample);
lowerIdx = size_t(m_lowerQuantile * double(nSample)),
upperIdx = size_t(m_upperQuantile * double(nSample));
this->getLogManager() << Kernel::LogLevel_Trace << "Examined dataset is [" << nSample << "x" << nFeatures << "].\n";
......@@ -74,8 +73,7 @@ bool CBoxAlgorithmOutlierRemoval::pruneSet(std::vector<feature_vector_t>& pruned
std::vector<std::pair<double, size_t>> featureValues;
featureValues.resize(nSample);
for (size_t f = 0; f < nFeatures; ++f)
{
for (size_t f = 0; f < nFeatures; ++f) {
for (size_t i = 0; i < nSample; ++i) { featureValues[i] = std::pair<double, uint32_t>(m_datasets[i].sampleMatrix->getBuffer()[f], i); }
std::sort(featureValues.begin(), featureValues.end(), PairLess);
......@@ -98,10 +96,10 @@ bool CBoxAlgorithmOutlierRemoval::pruneSet(std::vector<feature_vector_t>& pruned
}
this->getLogManager() << Kernel::LogLevel_Trace << "Kept " << keptIdxs.size() << " examples in total ("
<< (100.0 * keptIdxs.size() / double(m_datasets.size())) << "% of " << m_datasets.size() << ")\n";
<< (100.0 * double(keptIdxs.size()) / double(m_datasets.size())) << "% of " << m_datasets.size() << ")\n";
pruned.clear();
for (size_t idx : keptIdxs) { pruned.push_back(m_datasets[idx]); }
for (const auto& idx : keptIdxs) { pruned.push_back(m_datasets[idx]); }
return true;
}
......@@ -111,27 +109,21 @@ bool CBoxAlgorithmOutlierRemoval::process()
Kernel::IBoxIO& boxContext = this->getDynamicBoxContext();
// Stimulations
for (uint32_t i = 0; i < boxContext.getInputChunkCount(0); ++i)
{
for (uint32_t i = 0; i < boxContext.getInputChunkCount(0); ++i) {
m_stimDecoder.decode(i);
if (m_stimDecoder.isHeaderReceived())
{
if (m_stimDecoder.isHeaderReceived()) {
m_stimEncoder.encodeHeader();
boxContext.markOutputAsReadyToSend(0, boxContext.getInputChunkStartTime(0, i), boxContext.getInputChunkEndTime(0, i));
}
if (m_stimDecoder.isBufferReceived())
{
if (m_stimDecoder.isBufferReceived()) {
const CStimulationSet* stimSet = m_stimDecoder.getOutputStimulationSet();
for (uint32_t s = 0; s < stimSet->size(); ++s)
{
if (stimSet->getId(s) == m_trigger)
{
for (uint32_t s = 0; s < stimSet->size(); ++s) {
if (stimSet->getId(s) == m_trigger) {
std::vector<feature_vector_t> pruned;
if (!pruneSet(pruned)) { return false; }
// encode
for (auto& feature : pruned)
{
for (const auto& feature : pruned) {
m_sampleEncoder.getInputMatrix()->copy(*feature.sampleMatrix);
m_sampleEncoder.encodeBuffer();
boxContext.markOutputAsReadyToSend(1, feature.startTime, feature.endTime);
......@@ -144,8 +136,7 @@ bool CBoxAlgorithmOutlierRemoval::process()
m_stimEncoder.getInputStimulationSet()->clear();
if (m_triggerTime >= boxContext.getInputChunkStartTime(0, i) && m_triggerTime < boxContext.getInputChunkEndTime(0, i))
{
if (m_triggerTime >= boxContext.getInputChunkStartTime(0, i) && m_triggerTime < boxContext.getInputChunkEndTime(0, i)) {
m_stimEncoder.getInputStimulationSet()->push_back(m_trigger, m_triggerTime, 0);
m_triggerTime = -1LL;
}
......@@ -154,8 +145,7 @@ bool CBoxAlgorithmOutlierRemoval::process()
boxContext.markOutputAsReadyToSend(0, boxContext.getInputChunkStartTime(0, i), boxContext.getInputChunkEndTime(0, i));
}
if (m_stimDecoder.isEndReceived())
{
if (m_stimDecoder.isEndReceived()) {
m_stimEncoder.encodeEnd();
boxContext.markOutputAsReadyToSend(0, boxContext.getInputChunkStartTime(0, i), boxContext.getInputChunkEndTime(0, i));
......@@ -164,11 +154,9 @@ bool CBoxAlgorithmOutlierRemoval::process()
// Feature vectors
for (uint32_t i = 0; i < boxContext.getInputChunkCount(1); ++i)
{
for (uint32_t i = 0; i < boxContext.getInputChunkCount(1); ++i) {
m_sampleDecoder.decode(i);
if (m_sampleDecoder.isHeaderReceived())
{
if (m_sampleDecoder.isHeaderReceived()) {
m_sampleEncoder.getInputMatrix()->copyDescription(*m_sampleDecoder.getOutputMatrix());
m_sampleEncoder.encodeHeader();
......@@ -176,8 +164,7 @@ bool CBoxAlgorithmOutlierRemoval::process()
}
// pad feature to set
if (m_sampleDecoder.isBufferReceived())
{
if (m_sampleDecoder.isBufferReceived()) {
const CMatrix* pFeatureVectorMatrix = m_sampleDecoder.getOutputMatrix();
feature_vector_t tmp;
......@@ -189,8 +176,7 @@ bool CBoxAlgorithmOutlierRemoval::process()
m_datasets.push_back(tmp);
}
if (m_sampleDecoder.isEndReceived())
{
if (m_sampleDecoder.isEndReceived()) {
m_sampleEncoder.encodeEnd();
boxContext.markOutputAsReadyToSend(1, boxContext.getInputChunkStartTime(1, i), boxContext.getInputChunkEndTime(1, i));
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment