Mentions légales du service

Skip to content
Snippets Groups Projects
Commit f81d4dd3 authored by hhakim's avatar hhakim
Browse files

Delete get_Fv_mul_mode method and make related changes since 430188 disabled...

Delete get_Fv_mul_mode method and make related changes since 430188 disabled the Faust-vector mul alternative methods.
parent 33aff8f5
Branches
Tags
No related merge requests found
......@@ -84,7 +84,6 @@ namespace Faust
virtual void copy_mul_mode_state(const TransformHelper<FPP,DEV>& th);
void copy_state(const TransformHelper<FPP,DEV>& th);
int get_mul_order_opt_mode() const;
int get_Fv_mul_mode() const;
void eval_sliced_Transform();
void eval_fancy_idx_Transform();
virtual TransformHelper<FPP, DEV>* slice(faust_unsigned_int start_row_id, faust_unsigned_int end_row_id,
......@@ -108,8 +107,6 @@ namespace Faust
faust_unsigned_int fancy_num_cols;
std::shared_ptr<Transform<FPP,DEV>> transform;
int mul_order_opt_mode;
int Fv_mul_mode;
};
}
#include "faust_TransformHelperGen.hpp"
......
......@@ -2,7 +2,7 @@
namespace Faust
{
template<typename FPP, FDevice DEV>
TransformHelperGen<FPP,DEV>::TransformHelperGen() : is_transposed(false), is_conjugate(false), is_sliced(false), is_fancy_indexed(false), transform(std::make_shared<Transform<FPP,DEV>>()), mul_order_opt_mode(0), Fv_mul_mode(0)
TransformHelperGen<FPP,DEV>::TransformHelperGen() : is_transposed(false), is_conjugate(false), is_sliced(false), is_fancy_indexed(false), transform(std::make_shared<Transform<FPP,DEV>>()), mul_order_opt_mode(0)
{
}
......@@ -249,7 +249,6 @@ namespace Faust
void TransformHelperGen<FPP,DEV>::copy_mul_mode_state(const TransformHelper<FPP,DEV>& th)
{
this->mul_order_opt_mode = th.mul_order_opt_mode;
this->Fv_mul_mode = th.Fv_mul_mode;
}
template<typename FPP, FDevice DEV>
......@@ -258,12 +257,6 @@ namespace Faust
return this->mul_order_opt_mode;
}
template<typename FPP, FDevice DEV>
int TransformHelperGen<FPP,DEV>::get_Fv_mul_mode() const
{
return this->Fv_mul_mode;
}
template<typename FPP, FDevice DEV>
void TransformHelperGen<FPP, DEV>::eval_sliced_Transform()
{
......
......@@ -218,9 +218,6 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
else if(!strcmp("set_FM_mul_mode", cmd))
// def in module faust_optimize
set_FM_mul_mode<SCALAR,DEV>(prhs, nrhs, plhs, nlhs);
else if(!strcmp("set_Fv_mul_mode", cmd))
// def in module faust_optimize
set_Fv_mul_mode<SCALAR,DEV>(prhs, nrhs, plhs, nlhs);
else if(!strcmp("fourier", cmd))
faust_fourier<SCALAR,DEV>(prhs, nrhs, plhs, nlhs);
else if(!strcmp("hadamard", cmd))
......
......@@ -3050,16 +3050,16 @@ class FaustMulMode:
## \brief The method is basically the same as GREEDY_ALL_BEST_GENMAT but it is implemented using the Torch library.
##
## This method is only available for the specific packages pyfaust_torch.
TORCH_CPU_GREEDY=9
## \brief The same as TORCH_CPU_L2R except that torch::chain_matmul is used to
## compute in one call the intermediary product of dense contiguous
## factors, then the result is multiplied by sparse factors if any remains.
##
## torch::chain_matmul follows the dynamic programming principle as DYNPROG method does (but the former handles only dense matrices).
##
## References:
## https://pytorch.org/cppdocs/api/function_namespaceat_1aee491a9ff453b6033b4106516bc61a9d.html?highlight=chain_matmul
## https://pytorch.org/docs/stable/generated/torch.chain_matmul.html?highlight=chain_matmul#torch.chain_matmul
TORCH_CPU_GREEDY=9
## \brief The same as TORCH_CPU_L2R except that torch::chain_matmul is used to
## compute in one call the intermediary product of dense contiguous
## factors, then the result is multiplied by sparse factors if any remains.
##
## torch::chain_matmul follows the dynamic programming principle as DYNPROG method does (but the former handles only dense matrices).
##
## References:
## https://pytorch.org/cppdocs/api/function_namespaceat_1aee491a9ff453b6033b4106516bc61a9d.html?highlight=chain_matmul
## https://pytorch.org/docs/stable/generated/torch.chain_matmul.html?highlight=chain_matmul#torch.chain_matmul
## This method is only available for the specific packages pyfaust_torch.
TORCH_CPU_DENSE_DYNPROG_SPARSE_L2R=10
## This method is only available for the specific packages pyfaust_torch.
TORCH_CPU_DENSE_DYNPROG_SPARSE_L2R=10
......@@ -28,21 +28,10 @@ void FaustCoreCpp<@TYPE@,Cpu>::multiply(@TYPE@* value_y,int nbrow_y,int nbcol_y,
}
if (nbcol_x == 1)
{
if(this->transform->get_Fv_mul_mode() == Faust::DEFAULT_L2R)
{
// assuming that x and y are pointers to memory allocated to the proper
// sizes
// Y = this->transform->multiply(value_x);
this->transform->multiply(value_x, value_y);
}
else
{ // for other ways to multiply, it is not handled yet
Faust::Vect<@TYPE@,Cpu> X(nbrow_x,value_x);
Faust::Vect<@TYPE@,Cpu> Y;
Y = this->transform->multiply(X);
memcpy(value_y,Y.getData(),sizeof(@TYPE@)*nbrow_y);
}
// assuming that x and y are pointers to memory allocated to the proper
// sizes
// Y = this->transform->multiply(value_x);
this->transform->multiply(value_x, value_y);
}
else
{
......
......@@ -29,21 +29,10 @@ void FaustCoreCpp<@TYPE@,GPU2>::multiply(@TYPE@* value_y,int nbrow_y,int nbcol_y
}
if (nbcol_x == 1)
{
if(this->transform->get_Fv_mul_mode() == Faust::DEFAULT_L2R)
{
// assuming that x and y are pointers to memory allocated to the proper
// sizes
// Y = this->transform->multiply(value_x);
this->transform->multiply(value_x, value_y);
}
else
{ // for other ways to multiply, it is not handled yet
Faust::Vect<@TYPE@,Cpu> X(nbrow_x,value_x);
Faust::Vect<@TYPE@,Cpu> Y;
Y = this->transform->multiply(X);
memcpy(value_y,Y.getData(),sizeof(@TYPE@)*nbrow_y);
}
// assuming that x and y are pointers to memory allocated to the proper
// sizes
// Y = this->transform->multiply(value_x);
this->transform->multiply(value_x, value_y);
}
else
{
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment