Commit 6f4f0e0a authored by BLANCHARD Pierre's avatar BLANCHARD Pierre

Some more cleanup in MatrixKernel class (renaming DIM to NCMP, to avoid...

Some more cleanup in MatrixKernel class (renaming DIM to NCMP, to avoid confusion in other applications) as well as in Chebyshev/Uniform Tensorial tests.
parent 6ed537c1
......@@ -56,9 +56,9 @@ class FChebSymTensorialKernel
: public FAbstractChebKernel<CellClass, ContainerClass, MatrixKernelClass, ORDER, NVALS>
{
typedef FAbstractChebKernel<CellClass, ContainerClass, MatrixKernelClass, ORDER, NVALS> AbstractBaseClass;
typedef SymmetryHandler<ORDER, MatrixKernelClass::DIM, MatrixKernelClass::Type> SymmetryHandlerClass;
typedef SymmetryHandler<ORDER, MatrixKernelClass::NCMP, MatrixKernelClass::Type> SymmetryHandlerClass;
enum {nnodes = AbstractBaseClass::nnodes,
dim = MatrixKernelClass::DIM,
ncmp = MatrixKernelClass::NCMP,
nRhs = MatrixKernelClass::NRHS,
nLhs = MatrixKernelClass::NLHS};
......
......@@ -512,11 +512,11 @@ static void precompute(const int idxK, const FReal CellWidth,
because it allows us to use to associate the far-field interactions based on
the index \f$t = 7^2(i+3) + 7(j+3) + (k+3)\f$ where \f$(i,j,k)\f$ denotes
the relative position of the source cell to the target cell. */
template <int ORDER, int DIM, KERNEL_FUNCTION_TYPE TYPE> class SymmetryHandler;
template <int ORDER, int NCMP, KERNEL_FUNCTION_TYPE TYPE> class SymmetryHandler;
/*! Specialization for homogeneous kernel functions */
template <int ORDER, int DIM>
class SymmetryHandler<ORDER, DIM, HOMOGENEOUS>
template <int ORDER, int NCMP>
class SymmetryHandler<ORDER, NCMP, HOMOGENEOUS>
{
static const unsigned int nnodes = ORDER*ORDER*ORDER;
......@@ -539,10 +539,10 @@ public:
{
// For each component of kernel matrix
// init all 343 item to zero, because effectively only 16 exist
K = new FReal** [DIM];
LowRank = new int* [DIM];
K = new FReal** [NCMP];
LowRank = new int* [NCMP];
for (unsigned int d=0; d<DIM; ++d) {
for (unsigned int d=0; d<NCMP; ++d) {
K[d] = new FReal* [343];
LowRank[d] = new int [343];
......@@ -565,7 +565,7 @@ public:
// precompute 16 M2L operators
const FReal ReferenceCellWidth = FReal(2.);
for (unsigned int d=0; d<DIM; ++d)
for (unsigned int d=0; d<NCMP; ++d)
precompute<ORDER,MatrixKernelClass>(d, ReferenceCellWidth, Epsilon, K[d], LowRank[d]);
}
......@@ -575,7 +575,7 @@ public:
/** Destructor */
~SymmetryHandler()
{
for (unsigned int d=0; d<DIM; ++d)
for (unsigned int d=0; d<NCMP; ++d)
for (unsigned int t=0; t<343; ++t) if (K[d][t]!=NULL) delete [] K[d][t];
}
......@@ -598,8 +598,8 @@ public:
/*! Specialization for non-homogeneous kernel functions */
template <int ORDER, int DIM>
class SymmetryHandler<ORDER, DIM, NON_HOMOGENEOUS>
template <int ORDER, int NCMP>
class SymmetryHandler<ORDER, NCMP, NON_HOMOGENEOUS>
{
static const unsigned int nnodes = ORDER*ORDER*ORDER;
......@@ -625,9 +625,9 @@ public:
: TreeHeight(inTreeHeight)
{
// init all 343 item to zero, because effectively only 16 exist
K = new FReal*** [DIM];
LowRank = new int** [DIM];
for (unsigned int d=0; d<DIM; ++d) {
K = new FReal*** [NCMP];
LowRank = new int** [NCMP];
for (unsigned int d=0; d<NCMP; ++d) {
K[d] = new FReal** [TreeHeight];
LowRank[d] = new int* [TreeHeight];
K[d][0] = NULL; K[d][1] = NULL;
......@@ -654,7 +654,7 @@ public:
}
// precompute 16 M2L operators at all levels having far-field interactions
for (unsigned int d=0; d<DIM; ++d) {
for (unsigned int d=0; d<NCMP; ++d) {
FReal CellWidth = RootCellWidth / FReal(2.); // at level 1
CellWidth /= FReal(2.); // at level 2
for (unsigned int l=2; l<TreeHeight; ++l) {
......@@ -669,7 +669,7 @@ public:
/** Destructor */
~SymmetryHandler()
{
for (unsigned int d=0; d<DIM; ++d) {
for (unsigned int d=0; d<NCMP; ++d) {
for (unsigned int l=0; l<TreeHeight; ++l) {
if (K[d][l]!=NULL) {
for (unsigned int t=0; t<343; ++t) if (K[d][l][t]!=NULL) delete [] K[d][l][t];
......
......@@ -86,7 +86,7 @@ class FChebTensorialM2LHandler<ORDER,MatrixKernelClass,HOMOGENEOUS> : FNoCopyabl
enum {order = ORDER,
nnodes = TensorTraits<ORDER>::nnodes,
ninteractions = 316,// 7^3 - 3^3 (max num cells in far-field)
dim = MatrixKernelClass::DIM};
ncmp = MatrixKernelClass::NCMP};
// const MatrixKernelClass MatrixKernel;
......@@ -118,18 +118,18 @@ public:
if (U||B) throw std::runtime_error("U or B operator already set");
// allocate C
C = new FReal*[dim];
for (unsigned int d=0; d<dim; ++d)
C = new FReal*[ncmp];
for (unsigned int d=0; d<ncmp; ++d)
C[d]=NULL;
for (unsigned int d=0; d<dim; ++d)
for (unsigned int d=0; d<ncmp; ++d)
if (C[d]) throw std::runtime_error("Compressed M2L operator already set");
// Compute matrix of interactions
const FReal ReferenceCellWidth = FReal(2.);
rank = ComputeAndCompress<order>(MatrixKernel, ReferenceCellWidth, epsilon, U, C, B);
unsigned long sizeM2L = 343*dim*rank*rank*sizeof(FReal);
unsigned long sizeM2L = 343*ncmp*rank*rank*sizeof(FReal);
// write info
......@@ -141,7 +141,7 @@ public:
{
if (U != NULL) delete [] U;
if (B != NULL) delete [] B;
for (unsigned int d=0; d<dim; ++d)
for (unsigned int d=0; d<ncmp; ++d)
if (C[d] != NULL) delete [] C[d];
}
......@@ -204,7 +204,7 @@ class FChebTensorialM2LHandler<ORDER,MatrixKernelClass,NON_HOMOGENEOUS> : FNoCop
enum {order = ORDER,
nnodes = TensorTraits<ORDER>::nnodes,
ninteractions = 316,// 7^3 - 3^3 (max num cells in far-field)
dim = MatrixKernelClass::DIM};
ncmp = MatrixKernelClass::NCMP};
// Tensorial MatrixKernel and homogeneity specific
FReal **U, **B;
......@@ -249,14 +249,14 @@ public:
// allocate C
C = new FReal**[TreeHeight];
for (unsigned int l=0; l<TreeHeight; ++l){
C[l] = new FReal*[dim];
for (unsigned int d=0; d<dim; ++d)
C[l] = new FReal*[ncmp];
for (unsigned int d=0; d<ncmp; ++d)
C[l][d]=NULL;
}
for (unsigned int l=0; l<TreeHeight; ++l) {
if (U[l] || B[l]) throw std::runtime_error("Operator U or B already set");
for (unsigned int d=0; d<dim; ++d)
for (unsigned int d=0; d<ncmp; ++d)
if (C[l][d]) throw std::runtime_error("Compressed M2L operator already set");
}
......@@ -268,7 +268,7 @@ public:
rank[l] = ComputeAndCompress<order>(MatrixKernel, CellWidth, epsilon, U[l], C[l], B[l]);
CellWidth /= FReal(2.); // at level l+1
}
unsigned long sizeM2L = (TreeHeight-2)*343*dim*rank[2]*rank[2]*sizeof(FReal);
unsigned long sizeM2L = (TreeHeight-2)*343*ncmp*rank[2]*rank[2]*sizeof(FReal);
// write info
std::cout << "Compute and Set M2L operators of " << TreeHeight-2 << " levels ("<< long(sizeM2L/**1e-6*/) <<" Bytes) in "
......@@ -281,7 +281,7 @@ public:
for (unsigned int l=0; l<TreeHeight; ++l) {
if (U[l] != NULL) delete [] U[l];
if (B[l] != NULL) delete [] B[l];
for (unsigned int d=0; d<dim; ++d)
for (unsigned int d=0; d<ncmp; ++d)
if (C[l][d] != NULL) delete [] C[l][d];
}
}
......@@ -363,10 +363,10 @@ unsigned int ComputeAndCompress(const MatrixKernelClass *const MatrixKernel,
const unsigned int order = ORDER;
const unsigned int nnodes = TensorTraits<ORDER>::nnodes;
const unsigned int ninteractions = 316;
const unsigned int dim = MatrixKernelClass::DIM;
const unsigned int ncmp = MatrixKernelClass::NCMP;
// allocate memory and store compressed M2L operators
for (unsigned int d=0; d<dim; ++d)
for (unsigned int d=0; d<ncmp; ++d)
if (C[d]) throw std::runtime_error("Compressed M2L operators are already set");
// interpolation points of source (Y) and target (X) cell
......@@ -376,8 +376,8 @@ unsigned int ComputeAndCompress(const MatrixKernelClass *const MatrixKernel,
// allocate memory and compute 316 m2l operators
FReal** _C;
_C = new FReal* [dim];
for (unsigned int d=0; d<dim; ++d)
_C = new FReal* [ncmp];
for (unsigned int d=0; d<ncmp; ++d)
_C[d] = new FReal [nnodes*nnodes * ninteractions];
unsigned int counter = 0;
......@@ -396,11 +396,11 @@ unsigned int ComputeAndCompress(const MatrixKernelClass *const MatrixKernel,
// Compute current M2L interaction (block matrix)
FReal* block;
block = new FReal[dim];
block = new FReal[ncmp];
MatrixKernel->evaluateBlock(X[m], Y[n], block);
// Copy block in C
for (unsigned int d=0; d<dim; ++d)
for (unsigned int d=0; d<ncmp; ++d)
_C[d][counter*nnodes*nnodes + n*nnodes + m] = block[d];
delete [] block;
......@@ -444,24 +444,24 @@ unsigned int ComputeAndCompress(const MatrixKernelClass *const MatrixKernel,
// store C
counter = 0;
for (unsigned int d=0; d<dim; ++d)
for (unsigned int d=0; d<ncmp; ++d)
C[d] = new FReal [343 * rank*rank];
for (int i=-3; i<=3; ++i)
for (int j=-3; j<=3; ++j)
for (int k=-3; k<=3; ++k) {
const unsigned int idx = (i+3)*7*7 + (j+3)*7 + (k+3);
if (abs(i)>1 || abs(j)>1 || abs(k)>1) {
for (unsigned int d=0; d<dim; ++d)
for (unsigned int d=0; d<ncmp; ++d)
FBlas::copy(rank*rank, _C[d] + counter*rank*rank, C[d] + idx*rank*rank);
counter++;
} else {
for (unsigned int d=0; d<dim; ++d)
for (unsigned int d=0; d<ncmp; ++d)
FBlas::setzero(rank*rank, C[d] + idx*rank*rank);
}
}
if (counter != ninteractions)
throw std::runtime_error("Number of interactions must correspond to 316");
for (unsigned int d=0; d<dim; ++d)
for (unsigned int d=0; d<ncmp; ++d)
delete [] _C[d];
......
......@@ -44,8 +44,8 @@ enum KERNEL_FUNCTION_TYPE {HOMOGENEOUS, NON_HOMOGENEOUS};
* This class provides the evaluators and scaling functions of the matrix
* kernels. A matrix kernel should be understood in the sense of a kernel
* of interaction (or the fundamental solution of a given equation).
* It can either be scalar (DIM=1) or tensorial (DIM>1) depending on the
* dimension of the equation considered. DIM denotes the number of components
* It can either be scalar (NCMP=1) or tensorial (NCMP>1) depending on the
* dimension of the equation considered. NCMP denotes the number of components
* that are actually stored (e.g. 6 for a \f$3\times3\f$ symmetric tensor).
* Notes on application scheme:
* Let there be a kernel \f$K\f$ such that \f$X_i=K_{ij}Y_j\f$
......@@ -70,11 +70,11 @@ struct FInterpMatrixKernelR : FInterpAbstractMatrixKernel
{
static const KERNEL_FUNCTION_TYPE Type = /*NON_*/HOMOGENEOUS;
static const KERNEL_FUNCTION_IDENTIFIER Identifier = ONE_OVER_R;
static const unsigned int DIM = 1; //PB: dimension of kernel
static const unsigned int NPV = 1;
static const unsigned int NPOT = 1;
static const unsigned int NRHS = 1;
static const unsigned int NLHS = 1;
static const unsigned int NCMP = 1; //< number of components
static const unsigned int NPV = 1; //< dim of physical values
static const unsigned int NPOT = 1; //< dim of potentials
static const unsigned int NRHS = 1; //< dim of mult exp
static const unsigned int NLHS = 1; //< dim of loc exp
FInterpMatrixKernelR(const double = 0.0, const unsigned int = 0) {}
......@@ -127,11 +127,11 @@ struct FInterpMatrixKernelRR : FInterpAbstractMatrixKernel
{
static const KERNEL_FUNCTION_TYPE Type = HOMOGENEOUS;
static const KERNEL_FUNCTION_IDENTIFIER Identifier = ONE_OVER_R_SQUARED;
static const unsigned int DIM = 1; //PB: dimension of kernel
static const unsigned int NPV = 1;
static const unsigned int NPOT = 1;
static const unsigned int NRHS = 1;
static const unsigned int NLHS = 1;
static const unsigned int NCMP = 1; //< number of components
static const unsigned int NPV = 1; //< dim of physical values
static const unsigned int NPOT = 1; //< dim of potentials
static const unsigned int NRHS = 1; //< dim of mult exp
static const unsigned int NLHS = 1; //< dim of loc exp
FInterpMatrixKernelRR(const double = 0.0, const unsigned int = 0) {}
......@@ -147,6 +147,11 @@ struct FInterpMatrixKernelRR : FInterpAbstractMatrixKernel
xy.getZ()*xy.getZ());
}
void evaluateBlock(const FPoint& x, const FPoint& y, FReal* block) const
{
block[0]=this->evaluate(x,y);
}
FReal getScaleFactor(const FReal RootCellWidth, const int TreeLevel) const
{
const FReal CellWidth(RootCellWidth / FReal(FMath::pow(2, TreeLevel)));
......@@ -166,11 +171,11 @@ struct FInterpMatrixKernelLJ : FInterpAbstractMatrixKernel
{
static const KERNEL_FUNCTION_TYPE Type = NON_HOMOGENEOUS;
static const KERNEL_FUNCTION_IDENTIFIER Identifier = LENNARD_JONES_POTENTIAL;
static const unsigned int DIM = 1; //PB: dimension of kernel
static const unsigned int NPV = 1;
static const unsigned int NPOT = 1;
static const unsigned int NRHS = 1;
static const unsigned int NLHS = 1;
static const unsigned int NCMP = 1; //< number of components
static const unsigned int NPV = 1; //< dim of physical values
static const unsigned int NPOT = 1; //< dim of potentials
static const unsigned int NRHS = 1; //< dim of mult exp
static const unsigned int NLHS = 1; //< dim of loc exp
FInterpMatrixKernelLJ(const double = 0.0, const unsigned int = 0) {}
......@@ -189,6 +194,11 @@ struct FInterpMatrixKernelLJ : FInterpAbstractMatrixKernel
return one_over_r6 * one_over_r6 - one_over_r6;
}
void evaluateBlock(const FPoint& x, const FPoint& y, FReal* block) const
{
block[0]=this->evaluate(x,y);
}
FReal getScaleFactor(const FReal, const int) const
{
// return 1 because non homogeneous kernel functions cannot be scaled!!!
......@@ -206,7 +216,7 @@ struct FInterpMatrixKernelLJ : FInterpAbstractMatrixKernel
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//
// Tensorial Matrix Kernels (DIM>1)
// Tensorial Matrix Kernels (NCMP>1)
//
// The definition of the potential p and force f are extended to the case
// of tensorial interaction kernels:
......@@ -236,15 +246,15 @@ struct FInterpMatrixKernel_IOR : FInterpAbstractMatrixKernel
{
static const KERNEL_FUNCTION_TYPE Type = /*NON_*/HOMOGENEOUS;
static const KERNEL_FUNCTION_IDENTIFIER Identifier = ID_OVER_R;
static const unsigned int NK = 3*3; // dim of kernel
static const unsigned int DIM = 6; // reduced dim
static const unsigned int NPOT = 3; // dim of potentials
static const unsigned int NPV = 3; // dim of physical values
static const unsigned int NRHS = NPV; // dim of mult exp (here = dim physval)
static const unsigned int NLHS = NPOT*NRHS; // dim of loc exp (here = NPOT*NRHS)
static const unsigned int NK = 3*3; //< total number of components
static const unsigned int NCMP = 6; //< number of components after symmetry
static const unsigned int NPV = 3; //< dim of physical values
static const unsigned int NPOT = 3; //< dim of potentials
static const unsigned int NRHS = NPV; //< dim of mult exp
static const unsigned int NLHS = NPOT*NRHS; //< dim of loc exp
// store indices (i,j) corresponding to sym idx
static const unsigned int indexTab[/*2*DIM=12*/];
static const unsigned int indexTab[/*2*NCMP=12*/];
// store positions in sym tensor
static const unsigned int applyTab[/*9*/];
......@@ -252,7 +262,7 @@ struct FInterpMatrixKernel_IOR : FInterpAbstractMatrixKernel
const unsigned int _i,_j;
FInterpMatrixKernel_IOR(const double = 0.0, const unsigned int d = 0)
: _i(indexTab[d]), _j(indexTab[d+DIM])
: _i(indexTab[d]), _j(indexTab[d+NCMP])
{}
// returns position in reduced storage from position in full 3x3 matrix
......@@ -276,9 +286,9 @@ struct FInterpMatrixKernel_IOR : FInterpAbstractMatrixKernel
const FPoint xy(x-y);
const FReal one_over_r = FReal(1.)/xy.norm();
for(unsigned int d=0;d<DIM;++d){
for(unsigned int d=0;d<NCMP;++d){
// unsigned int i = indexTab[d];
// unsigned int j = indexTab[d+DIM];
// unsigned int j = indexTab[d+NCMP];
// if(i==j)
block[d] = one_over_r;
......@@ -313,15 +323,15 @@ struct FInterpMatrixKernel_R_IJ : FInterpAbstractMatrixKernel
{
static const KERNEL_FUNCTION_TYPE Type = NON_HOMOGENEOUS;
static const KERNEL_FUNCTION_IDENTIFIER Identifier = R_IJ;
static const unsigned int NK = 3*3; // dim of kernel
static const unsigned int DIM = 6; // reduced dim
static const unsigned int NPOT = 3; // dim of potentials
static const unsigned int NPV = 3; // dim of physical values
static const unsigned int NRHS = NPV; // dim of mult exp (here = dim physval)
static const unsigned int NLHS = NPOT*NRHS; // dim of loc exp (here = NPOT*NRHS)
static const unsigned int NK = 3*3; //< total number of components
static const unsigned int NCMP = 6; //< number of components
static const unsigned int NPV = 3; //< dim of physical values
static const unsigned int NPOT = 3; //< dim of potentials
static const unsigned int NRHS = NPV; //< dim of mult exp
static const unsigned int NLHS = NPOT*NRHS; //< dim of loc exp
// store indices (i,j) corresponding to sym idx
static const unsigned int indexTab[/*2*DIM=12*/];
static const unsigned int indexTab[/*2*NCMP=12*/];
// store positions in sym tensor (when looping over NRHSxNLHS)
static const unsigned int applyTab[/*NK=9*/];
......@@ -333,7 +343,7 @@ struct FInterpMatrixKernel_R_IJ : FInterpAbstractMatrixKernel
const double _CoreWidth2; // if >0 then kernel is NON homogeneous
FInterpMatrixKernel_R_IJ(const double CoreWidth = 0.0, const unsigned int d = 0)
: _i(indexTab[d]), _j(indexTab[d+DIM]), _CoreWidth2(CoreWidth*CoreWidth)
: _i(indexTab[d]), _j(indexTab[d+NCMP]), _CoreWidth2(CoreWidth*CoreWidth)
{}
// returns position in reduced storage from position in full 3x3 matrix
......@@ -379,9 +389,9 @@ struct FInterpMatrixKernel_R_IJ : FInterpAbstractMatrixKernel
const FReal one_over_r3 = one_over_r*one_over_r*one_over_r;
const double r[3] = {xy.getX(),xy.getY(),xy.getZ()};
for(unsigned int d=0;d<DIM;++d){
for(unsigned int d=0;d<NCMP;++d){
unsigned int i = indexTab[d];
unsigned int j = indexTab[d+DIM];
unsigned int j = indexTab[d+NCMP];
if(i==j)
block[d] = one_over_r - r[i] * r[i] * one_over_r3;
......@@ -416,15 +426,15 @@ struct FInterpMatrixKernel_R_IJK : FInterpAbstractMatrixKernel
{
static const KERNEL_FUNCTION_TYPE Type = NON_HOMOGENEOUS;
static const KERNEL_FUNCTION_IDENTIFIER Identifier = R_IJK;
static const unsigned int NK = 3*3*3; // dim of kernel
static const unsigned int DIM = 10; // reduced dim
static const unsigned int NPOT = 3; // dim of potentials
static const unsigned int NPV = 3*3; // dim of physical values
static const unsigned int NRHS = NPV; // dim of mult exp (here = dim physval)
static const unsigned int NLHS = NPOT*NRHS; // dim of loc exp (here = NPOT*NRHS)
static const unsigned int NK = 3*3*3; //< total number of components
static const unsigned int NCMP = 10; //< number of components after symmetry
static const unsigned int NPOT = 3; //< dim of potentials
static const unsigned int NPV = 3*3; //< dim of physical values
static const unsigned int NRHS = NPV; //< dim of mult exp
static const unsigned int NLHS = NPOT*NRHS; //< dim of loc exp
// store indices (i,j,k) corresponding to sym idx
static const unsigned int indexTab[/*3*DIM=30*/];
static const unsigned int indexTab[/*3*NCMP=30*/];
// store positions in sym tensor wr to full
static const unsigned int applyTab[/*NK=27*/];
......@@ -436,7 +446,7 @@ struct FInterpMatrixKernel_R_IJK : FInterpAbstractMatrixKernel
const double _CoreWidth2; // if >0 then kernel is NON homogeneous
FInterpMatrixKernel_R_IJK(const double CoreWidth = 0.0, const unsigned int d = 0)
: _i(indexTab[d]), _j(indexTab[d+DIM]), _k(indexTab[d+2*DIM]), _CoreWidth2(CoreWidth*CoreWidth)
: _i(indexTab[d]), _j(indexTab[d+NCMP]), _k(indexTab[d+2*NCMP]), _CoreWidth2(CoreWidth*CoreWidth)
{}
// returns position in reduced storage from position in full 3x3x3 matrix
......@@ -449,7 +459,8 @@ struct FInterpMatrixKernel_R_IJK : FInterpAbstractMatrixKernel
FReal evaluate(const FPoint& x, const FPoint& y) const
{
// const FPoint xy(x-y);
// Convention for anti-symmetric kernels xy=y-x instead of xy=x-y
// This makes the P2P mutual interactions implementation more explicit
const FPoint xy(y-x);
const FReal one_over_r = FReal(1.)/sqrt(xy.getX()*xy.getX() +
xy.getY()*xy.getY() +
......@@ -494,7 +505,8 @@ struct FInterpMatrixKernel_R_IJK : FInterpAbstractMatrixKernel
void evaluateBlock(const FPoint& x, const FPoint& y, FReal* block) const
{
// const FPoint xy(x-y);
// Convention for anti-symmetric kernels xy=y-x instead of xy=x-y
// This makes the P2P mutual interactions implementation more explicit
const FPoint xy(y-x);
const FReal one_over_r = FReal(1.)/sqrt(xy.getX()*xy.getX() +
xy.getY()*xy.getY() +
......@@ -504,10 +516,10 @@ struct FInterpMatrixKernel_R_IJK : FInterpAbstractMatrixKernel
const double r[3] = {xy.getX(),xy.getY(),xy.getZ()};
for(unsigned int d=0;d<DIM;++d){
for(unsigned int d=0;d<NCMP;++d){
unsigned int i = indexTab[d];
unsigned int j = indexTab[d+DIM];
unsigned int k = indexTab[d+2*DIM];
unsigned int j = indexTab[d+NCMP];
unsigned int k = indexTab[d+2*NCMP];
if(i==j){
if(j==k) //i=j=k
......
......@@ -33,7 +33,7 @@
#include "./FUnifTensor.hpp"
/*! Precomputation of the 316 interactions by evaluation of the matrix kernel on the uniform grid and transformation into Fourier space. These interactions can be tensorial (of size dim) and are computed blockwise
/*! Precomputation of the 316 interactions by evaluation of the matrix kernel on the uniform grid and transformation into Fourier space. These interactions can be tensorial (of size ncmp) and are computed blockwise
PB: Compute() does not belong to the M2LHandler like it does in the Chebyshev kernel. This allows much nicer specialization of the M2LHandler class with respect to the homogeneity of the kernel of interaction like in the ChebyshevSym kernel.*/
template <int ORDER, class MatrixKernelClass>
static void Compute(const MatrixKernelClass *const MatrixKernel,
......@@ -44,12 +44,12 @@ static void Compute(const MatrixKernelClass *const MatrixKernel,
const unsigned int order = ORDER;
const unsigned int nnodes = TensorTraits<ORDER>::nnodes;
const unsigned int ninteractions = 316;
const unsigned int dim = MatrixKernelClass::DIM;
const unsigned int ncmp = MatrixKernelClass::NCMP;
typedef FUnifTensor<ORDER> TensorType;
// allocate memory and store compressed M2L operators
for (unsigned int d=0; d<dim; ++d)
for (unsigned int d=0; d<ncmp; ++d)
if (FC[d]) throw std::runtime_error("M2L operators are already set");
// interpolation points of source (Y) and target (X) cell
......@@ -63,11 +63,11 @@ static void Compute(const MatrixKernelClass *const MatrixKernel,
// reduce storage from nnodes^2=order^6 to (2order-1)^3
const unsigned int rc = (2*order-1)*(2*order-1)*(2*order-1);
_C = new FReal* [dim];
_FC = new FComplexe* [dim];
for (unsigned int d=0; d<dim; ++d)
_C = new FReal* [ncmp];
_FC = new FComplexe* [ncmp];
for (unsigned int d=0; d<ncmp; ++d)
_C[d] = new FReal[rc];
for (unsigned int d=0; d<dim; ++d)
for (unsigned int d=0; d<ncmp; ++d)
_FC[d] = new FComplexe[rc * ninteractions];
// initialize root node ids pairs
......@@ -100,7 +100,7 @@ static void Compute(const MatrixKernelClass *const MatrixKernel,
// Compute current M2L interaction (block matrix)
FReal* block;
block = new FReal[dim];
block = new FReal[ncmp];
MatrixKernel->evaluateBlock(X[node_ids_pairs[ido][0]],
Y[node_ids_pairs[ido][1]],
block);
......@@ -111,14 +111,14 @@ static void Compute(const MatrixKernelClass *const MatrixKernel,
// i.e. C[0] C[rc-1] C[rc-2] .. C[1] < WRONG!
// i.e. C[rc-1] C[0] C[1] .. C[rc-2] < RIGHT!
// _C[counter*rc + ido]
for (unsigned int d=0; d<dim; ++d)
for (unsigned int d=0; d<ncmp; ++d)
_C[d][perm[ido]] = block[d];
ido++;
}
// Apply Discrete Fourier Transformation
for (unsigned int d=0; d<dim; ++d)
for (unsigned int d=0; d<ncmp; ++d)
Dft.applyDFT(_C[d],_FC[d]+counter*rc);
// increment interaction counter
......@@ -131,7 +131,7 @@ static void Compute(const MatrixKernelClass *const MatrixKernel,
throw std::runtime_error("Number of interactions must correspond to 316");
// Free _C
for (unsigned int d=0; d<dim; ++d)
for (unsigned int d=0; d<ncmp; ++d)
delete [] _C[d];
// store FC
......@@ -139,15 +139,15 @@ static void Compute(const MatrixKernelClass *const MatrixKernel,
// reduce storage if real valued kernel
const unsigned int opt_rc = rc/2+1;
// allocate M2L
for (unsigned int d=0; d<dim; ++d)
FC[d] = new FComplexe[343 * opt_rc]; //PB: allocation already done wr DIM
for (unsigned int d=0; d<ncmp; ++d)
FC[d] = new FComplexe[343 * opt_rc]; //PB: allocation already done wr NCMP
for (int i=-3; i<=3; ++i)
for (int j=-3; j<=3; ++j)
for (int k=-3; k<=3; ++k) {
const unsigned int idx = (i+3)*7*7 + (j+3)*7 + (k+3);
if (abs(i)>1 || abs(j)>1 || abs(k)>1) {
for (unsigned int d=0; d<dim; ++d)
for (unsigned int d=0; d<ncmp; ++d)