Commit 93deeaed authored by Martin Khannouz's avatar Martin Khannouz Committed by Berenger Bramas

Add almost the same data mapping as in starpu mpi explicit.

parent 76f49d8e
......@@ -350,6 +350,31 @@ public:
unsigned int getDataType(){
return typeData[0]; }
/**
* Fills a particle from the current position in the file.
*
* @param outParticlePositions the position of particle to fill (FPoint<FReal> class)
* @param outPhysicalValue the physical value of particle to fill (FReal)
*/
void fillParticle(FPoint<FReal>*const outParticlePositions){
if(binaryFile){
file->read((char*)(outParticlePositions), sizeof(FReal)*3);
if(otherDataToRead> 0){
file->read((char*)(this->tmpVal), sizeof(FReal)*otherDataToRead);
}
} else {
FReal x,y,z;
(*this->file) >> x >> y >> z;
outParticlePositions->setPosition(x,y,z);
if(otherDataToRead> 0){
for (FSize i = 0 ; i <otherDataToRead; ++i){
(*this->file) >> x ;
}
}
}
}
/**
* Fills a particle from the current position in the file.
*
......@@ -494,6 +519,8 @@ private:
this->centerOfBox.setPosition(x,y,z);
this->boxWidth *= 2;
otherDataToRead = typeData[1] - (unsigned int)(4);
if(typeData[1] < 4)
otherDataToRead = 0;
};
void readBinaryHeader(){
std::cout << " File open in binary mode "<< std::endl;
......
......@@ -580,7 +580,6 @@ public:
// for(int idx = 0 ; idx < nbParticlesInArray ; ++idx){
// particleSaver->push(sortedParticlesArray[idx].particle);
// }
ParticleClass* particlesArrayInLeafOrder = nullptr;
FSize * leavesOffsetInParticles = nullptr;
FSize nbLeaves = 0;
......
......@@ -106,6 +106,53 @@ public:
(getRandom() * boxWidth) + centerOfBox.getY() - boxWidth/2,
(getRandom() * boxWidth) + centerOfBox.getZ() - boxWidth/2);
}
void fillParticleAtMortonIndex(FPoint<FReal>*const inParticlePositions, MortonIndex idx, unsigned int treeHeight){
MortonIndex mask = 0x1LL;
//Largeur de la boite au niveau des feuilles
FReal leafWidth = boxWidth / FReal(1<<(treeHeight-1));
//Décalage par rapport au centre de la moitié de la largeur de la boîte
FReal currentOffset = leafWidth / 2.0;
//Initialise x, y, z au centre de la boîte globale
FReal x, y, z;
x = centerOfBox.getX();
y = centerOfBox.getY();
z = centerOfBox.getZ();
//On va décaler le centre du père vers le centre du fils autant de fois qu'il y a de fils
//Comme ce sont des décalage succesif et plutôt indépendant, on peut commencer par les décalages au niveau des feuilles, ce qui est plus simple
for(unsigned int i = 0; i < treeHeight-1; ++i)
{
bool x_offset, y_offset, z_offset;
//Check le 1er bit qui correspond au z
z_offset = (idx & mask);
idx >>= 1;
//Check le 2nd bit qui correspond au y
y_offset = (idx & mask);
idx >>= 1;
//Check le 3ème bit qui correspond au x
x_offset = (idx & mask);
idx >>= 1;
//Décalage du x
if(x_offset)
x += currentOffset;
else
x -= currentOffset;
//Décalage du y
if(y_offset)
y += currentOffset;
else
y -= currentOffset;
//Décalage du z
if(z_offset)
z += currentOffset;
else
z -= currentOffset;
//On augmente les décallages au fur et à mesure que l'on remonte les étages
currentOffset *= 2;
}
inParticlePositions->setPosition( x, y, z);
}
/** Get a random number between 0 & 1 */
FReal getRandom() const{
......
......@@ -44,6 +44,7 @@
#include "Containers/FBoolArray.hpp"
#include <iostream>
#include <vector>
using namespace std;
template <class OctreeClass, class CellContainerClass, class KernelClass, class ParticleGroupClass, class StarPUCpuWrapperClass
......@@ -147,9 +148,10 @@ protected:
typedef FStarPUFmmPrioritiesV2 PrioClass;// FStarPUFmmPriorities
#endif
int mpi_rank, nproc;
std::vector<std::vector<std::vector<MortonIndex>>> nodeRepartition;
public:
FGroupTaskStarPUImplicitAlgorithm(OctreeClass*const inTree, KernelClass* inKernels)
FGroupTaskStarPUImplicitAlgorithm(OctreeClass*const inTree, KernelClass* inKernels, std::vector<MortonIndex>& distributedMortonIndex)
: tree(inTree), originalCpuKernel(inKernels),
cellHandles(nullptr),
noCommuteAtLastLevel(FEnv::GetBool("SCALFMM_NO_COMMUTE_LAST_L2L", true)),
......@@ -218,6 +220,7 @@ public:
#endif
initCodelet();
createMachinChose(distributedMortonIndex);
rebuildInteractions();
FLOG(FLog::Controller << "FGroupTaskStarPUAlgorithm (Max Worker " << starpu_worker_get_count() << ")\n");
......@@ -268,7 +271,8 @@ public:
void syncData(){
for(int idxLevel = 0 ; idxLevel < tree->getHeight() ; ++idxLevel){
for(int idxHandle = 0 ; idxHandle < int(cellHandles[idxLevel].size()) ; ++idxHandle){
if(isDataOwned(idxHandle, int(cellHandles[idxLevel].size()))) {
//if(isDataOwned(idxHandle, int(cellHandles[idxLevel].size()))) {
if(isDataOwnedBerenger(tree->getCellGroup(idxLevel, idxHandle)->getStartingIndex()+1, idxLevel)) {//Clean only our data handle
starpu_data_acquire(cellHandles[idxLevel][idxHandle].symb, STARPU_R);
starpu_data_release(cellHandles[idxLevel][idxHandle].symb);
starpu_data_acquire(cellHandles[idxLevel][idxHandle].up, STARPU_R);
......@@ -280,7 +284,8 @@ public:
}
{
for(int idxHandle = 0 ; idxHandle < int(particleHandles.size()) ; ++idxHandle){
if(isDataOwned(idxHandle, int(particleHandles.size()))) {
//if(isDataOwned(idxHandle, int(particleHandles.size()))) {
if(isDataOwnedBerenger(tree->getCellGroup(tree->getHeight()-1, idxHandle)->getStartingIndex()+1, tree->getHeight()-1)) {//Clean only our data handle
starpu_data_acquire(particleHandles[idxHandle].symb, STARPU_R);
starpu_data_release(particleHandles[idxHandle].symb);
starpu_data_acquire(particleHandles[idxHandle].down, STARPU_R);
......@@ -393,8 +398,30 @@ public:
int getRank(void) const {
return mpi_rank;
}
bool isDataOwned(int idxGroup, int nbGroup) const {
return dataMapping(idxGroup, nbGroup) == mpi_rank;
int getNProc(void) const {
return nproc;
}
//bool isDataOwned(int idxGroup, int nbGroup) const {
//return dataMapping(idxGroup, nbGroup) == mpi_rank;
//}
bool isDataOwnedBerenger(MortonIndex const idx, int const idxLevel) const {
return dataMappingBerenger(idx, idxLevel) == mpi_rank;
}
void createMachinChose(std::vector<MortonIndex> distributedMortonIndex)
{
nodeRepartition.resize(tree->getHeight(), std::vector<std::vector<MortonIndex>>(nproc, std::vector<MortonIndex>(2)));
for(int node_id = 0; node_id < nproc; ++node_id){
nodeRepartition[tree->getHeight()-1][node_id][0] = distributedMortonIndex[node_id*2];
nodeRepartition[tree->getHeight()-1][node_id][1] = distributedMortonIndex[node_id*2+1];
}
for(int idxLevel = tree->getHeight() - 2; idxLevel >= 0 ; --idxLevel){
nodeRepartition[idxLevel][0][0] = nodeRepartition[idxLevel+1][0][0] >> 3;
nodeRepartition[idxLevel][0][1] = nodeRepartition[idxLevel+1][0][1] >> 3;
for(int node_id = 1; node_id < nproc; ++node_id){
nodeRepartition[idxLevel][node_id][0] = FMath::Max(nodeRepartition[idxLevel+1][node_id][0] >> 3, nodeRepartition[idxLevel+1][node_id-1][0]+1); //Berenger phd :)
nodeRepartition[idxLevel][node_id][1] = nodeRepartition[idxLevel+1][node_id][1] >> 3;
}
}
}
protected:
/**
......@@ -744,7 +771,8 @@ protected:
void cleanHandle(){
for(int idxLevel = 0 ; idxLevel < tree->getHeight() ; ++idxLevel){
for(int idxHandle = 0 ; idxHandle < int(cellHandles[idxLevel].size()) ; ++idxHandle){
if(isDataOwned(idxHandle, int(cellHandles[idxLevel].size())))//Clean only our data handle
//if(isDataOwned(idxHandle, int(cellHandles[idxLevel].size())))//Clean only our data handle
if(isDataOwnedBerenger(tree->getCellGroup(idxLevel, idxHandle)->getStartingIndex()+1, idxLevel))//Clean only our data handle
{
starpu_data_unregister(cellHandles[idxLevel][idxHandle].symb);
starpu_data_unregister(cellHandles[idxLevel][idxHandle].up);
......@@ -755,7 +783,8 @@ protected:
}
{
for(int idxHandle = 0 ; idxHandle < int(particleHandles.size()) ; ++idxHandle){
if(isDataOwned(idxHandle, int(particleHandles.size())))
//if(isDataOwned(idxHandle, int(particleHandles.size())))
if(isDataOwnedBerenger(tree->getCellGroup(tree->getHeight()-1, idxHandle)->getStartingIndex()+1, tree->getHeight()-1))//Clean only our data handle
{
starpu_data_unregister(particleHandles[idxHandle].symb);
starpu_data_unregister(particleHandles[idxHandle].down);
......@@ -774,9 +803,11 @@ protected:
for(int idxLevel = 2 ; idxLevel < tree->getHeight() ; ++idxLevel){
cellHandles[idxLevel].resize(tree->getNbCellGroupAtLevel(idxLevel));
for(int idxGroup = 0 ; idxGroup < tree->getNbCellGroupAtLevel(idxLevel) ; ++idxGroup){
int registeringNode = dataMapping(idxGroup, tree->getNbCellGroupAtLevel(idxLevel));
where = (registeringNode == mpi_rank) ? STARPU_MAIN_RAM : -1;
const CellContainerClass* currentCells = tree->getCellGroup(idxLevel, idxGroup);
int registeringNode = dataMappingBerenger(currentCells->getStartingIndex()+1, idxLevel);
//int registeringNode = dataMapping(idxGroup, tree->getNbCellGroupAtLevel(idxLevel));
where = (registeringNode == mpi_rank) ? STARPU_MAIN_RAM : -1;
starpu_variable_data_register(&cellHandles[idxLevel][idxGroup].symb, where,
(uintptr_t)currentCells->getRawBuffer(), currentCells->getBufferSizeInByte());
starpu_variable_data_register(&cellHandles[idxLevel][idxGroup].up, where,
......@@ -797,7 +828,8 @@ protected:
{
particleHandles.resize(tree->getNbParticleGroup());
for(int idxGroup = 0 ; idxGroup < tree->getNbParticleGroup() ; ++idxGroup){
int registeringNode = dataMapping(idxGroup, tree->getNbParticleGroup());
//int registeringNode = dataMapping(idxGroup, tree->getNbParticleGroup());
int registeringNode = dataMappingBerenger(tree->getCellGroup(tree->getHeight()-1, idxGroup)->getStartingIndex()+1, tree->getHeight()-1);
where = (registeringNode == mpi_rank) ? STARPU_MAIN_RAM : -1;
ParticleGroupClass* containers = tree->getParticleGroup(idxGroup);
starpu_variable_data_register(&particleHandles[idxGroup].symb, where,
......@@ -820,31 +852,39 @@ protected:
}
}
int dataMapping(int idxGroup, int const nbGroup) const
int dataMappingBerenger(MortonIndex const idx, int const idxLevel) const
{
int bonusGroup = nbGroup%nproc; //Number of node in the bonus group
int groupPerNode = (int)floor((double)nbGroup / (double)nproc);
int mpi_node;
if(idxGroup < bonusGroup*(groupPerNode+1)) { //GroupCell in the bonus group
mpi_node = (idxGroup - (idxGroup%(groupPerNode+1)))/(groupPerNode+1);
}
else { //GroupCell outside the bonus group
idxGroup -= bonusGroup*(groupPerNode+1); //Remove the bonus group to compute easily
mpi_node = (idxGroup - (idxGroup%groupPerNode))/groupPerNode; //Find the corresponding node as if their was node in the bonus group
mpi_node += bonusGroup; //Shift to find the real node
}
if(mpi_node >= nproc)
{
cout << "Chenille (0)[" << mpi_rank << "] -> idxGroup:" << idxGroup << " nbGroup:" << nbGroup << endl;
cout << "Chenille (1)[" << mpi_rank << "] -> bonusGroup:" << bonusGroup << " groupPerNode:" << groupPerNode << endl;
if(idxGroup < bonusGroup*(groupPerNode+1))
cout << "Chenille (2)[" << mpi_rank << "] -> mid:" << (idxGroup%(groupPerNode+1)) << endl;
else
cout << "Chenille (2)[" << mpi_rank << "] -> mid:" << (idxGroup%groupPerNode) << endl;
cout << "Chenille (3)[" << mpi_rank << "] -> mpi_node:" << mpi_node << " isBonusGroup:" << (idxGroup < bonusGroup*(groupPerNode+1)) <<endl;
}
return mpi_node;
return 0;
for(int i = 0; i < nproc; ++i)
if(nodeRepartition[idxLevel][i][0] <= nodeRepartition[idxLevel][i][1] && idx > nodeRepartition[idxLevel][i][0] && idx <= nodeRepartition[idxLevel][i][1])
return i;
return -1;
}
//int dataMapping(int idxGroup, int const nbGroup) const
//{
//int bonusGroup = nbGroup%nproc; //Number of node in the bonus group
//int groupPerNode = (int)floor((double)nbGroup / (double)nproc);
//int mpi_node;
//if(idxGroup < bonusGroup*(groupPerNode+1)) { //GroupCell in the bonus group
//mpi_node = (idxGroup - (idxGroup%(groupPerNode+1)))/(groupPerNode+1);
//}
//else { //GroupCell outside the bonus group
//idxGroup -= bonusGroup*(groupPerNode+1); //Remove the bonus group to compute easily
//mpi_node = (idxGroup - (idxGroup%groupPerNode))/groupPerNode; //Find the corresponding node as if their was node in the bonus group
//mpi_node += bonusGroup; //Shift to find the real node
//}
//if(mpi_node >= nproc)
//{
//cout << "Chenille (0)[" << mpi_rank << "] -> idxGroup:" << idxGroup << " nbGroup:" << nbGroup << endl;
//cout << "Chenille (1)[" << mpi_rank << "] -> bonusGroup:" << bonusGroup << " groupPerNode:" << groupPerNode << endl;
//if(idxGroup < bonusGroup*(groupPerNode+1))
//cout << "Chenille (2)[" << mpi_rank << "] -> mid:" << (idxGroup%(groupPerNode+1)) << endl;
//else
//cout << "Chenille (2)[" << mpi_rank << "] -> mid:" << (idxGroup%groupPerNode) << endl;
//cout << "Chenille (3)[" << mpi_rank << "] -> mpi_node:" << mpi_node << " isBonusGroup:" << (idxGroup < bonusGroup*(groupPerNode+1)) <<endl;
//}
//return mpi_node;
//}
/**
* This function is creating the interactions vector between blocks.
* It fills externalInteractionsAllLevel and externalInteractionsLeafLevel.
......
// Keep in private GIT
#ifndef FGROUPTREE_HPP
#define FGROUPTREE_HPP
#include <vector>
#include <functional>
#include "../../Utils/FAssert.hpp"
#include "../../Utils/FPoint.hpp"
#include "../../Utils/FQuickSort.hpp"
#include "../../Containers/FTreeCoordinate.hpp"
#include "../../Containers/FCoordinateComputer.hpp"
#include "FGroupOfCells.hpp"
#include "FGroupOfParticles.hpp"
#include "FGroupAttachedLeaf.hpp"
template <class FReal, class CompositeCellClass, class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class GroupAttachedLeafClass, unsigned NbSymbAttributes, unsigned NbAttributesPerParticle, class AttributeClass = FReal>
class FGroupTreeBerenger {
public:
typedef GroupAttachedLeafClass BasicAttachedClass;
typedef FGroupOfParticles<FReal, NbSymbAttributes, NbAttributesPerParticle,AttributeClass> ParticleGroupClass;
typedef FGroupOfCells<CompositeCellClass, SymboleCellClass, PoleCellClass, LocalCellClass> CellGroupClass;
protected:
//< height of the tree (1 => only the root)
const int treeHeight;
//< max number of cells in a block
const int nbElementsPerBlock;
//< all the blocks of the tree
std::vector<CellGroupClass*>* cellBlocksPerLevel;
//< all the blocks of leaves
std::vector<ParticleGroupClass*> particleBlocks;
//< the space system center
const FPoint<FReal> boxCenter;
//< the space system corner (used to compute morton index)
const FPoint<FReal> boxCorner;
//< the space system width
const FReal boxWidth;
//< the width of a box at width level
const FReal boxWidthAtLeafLevel;
public:
typedef typename std::vector<CellGroupClass*>::iterator CellGroupIterator;
typedef typename std::vector<CellGroupClass*>::const_iterator CellGroupConstIterator;
typedef typename std::vector<ParticleGroupClass*>::iterator ParticleGroupIterator;
typedef typename std::vector<ParticleGroupClass*>::const_iterator ParticleGroupConstIterator;
/**
* This constructor create a group tree from a particle container index.
* The morton index are computed and the particles are sorted in a first stage.
* Then the leaf level is done.
* Finally the other leve are proceed one after the other.
* It should be easy to make it parallel using for and tasks.
* If no limite give inLeftLimite = -1
*/
template<class ParticleContainer>
FGroupTreeBerenger(const int inTreeHeight, const FReal inBoxWidth, const FPoint<FReal>& inBoxCenter,
const int inNbElementsPerBlock, ParticleContainer* inParticlesContainer,
const bool particlesAreSorted, std::vector<MortonIndex> const& distributedMortonIndex, MortonIndex inLeftLimite = -1):
treeHeight(inTreeHeight),nbElementsPerBlock(inNbElementsPerBlock),cellBlocksPerLevel(nullptr),
boxCenter(inBoxCenter), boxCorner(inBoxCenter,-(inBoxWidth/2)), boxWidth(inBoxWidth),
boxWidthAtLeafLevel(inBoxWidth/FReal(1<<(inTreeHeight-1))){
cellBlocksPerLevel = new std::vector<CellGroupClass*>[treeHeight];
MortonIndex* currentBlockIndexes = new MortonIndex[nbElementsPerBlock];
// First we work at leaf level
{
// Build morton index for particles
struct ParticleSortingStruct{
FSize originalIndex;
MortonIndex mindex;
};
// Convert position to morton index
const FSize nbParticles = inParticlesContainer->getNbParticles();
ParticleSortingStruct* particlesToSort = new ParticleSortingStruct[nbParticles];
{
const FReal* xpos = inParticlesContainer->getPositions()[0];
const FReal* ypos = inParticlesContainer->getPositions()[1];
const FReal* zpos = inParticlesContainer->getPositions()[2];
for(FSize idxPart = 0 ; idxPart < nbParticles ; ++idxPart){
const FTreeCoordinate host = FCoordinateComputer::GetCoordinateFromPositionAndCorner<FReal>(this->boxCorner, this->boxWidth,
treeHeight,
FPoint<FReal>(xpos[idxPart], ypos[idxPart], zpos[idxPart]) );
const MortonIndex particleIndex = host.getMortonIndex(treeHeight-1);
particlesToSort[idxPart].mindex = particleIndex;
particlesToSort[idxPart].originalIndex = idxPart;
}
}
// Sort if needed
if(particlesAreSorted == false){
FQuickSort<ParticleSortingStruct, FSize>::QsOmp(particlesToSort, nbParticles, [](const ParticleSortingStruct& v1, const ParticleSortingStruct& v2){
return v1.mindex <= v2.mindex;
});
}
FAssertLF(nbParticles == 0 || inLeftLimite < particlesToSort[0].mindex);
// Convert to block
const int idxLevel = (treeHeight - 1);
FSize* nbParticlesPerLeaf = new FSize[nbElementsPerBlock];
FSize firstParticle = 0;
// We need to proceed each group in sub level
while(firstParticle != nbParticles){
int sizeOfBlock = 0;
FSize lastParticle = firstParticle;
// Count until end of sub group is reached or we have enough cells (or until it reach the next mortonIndex boundary) TODO
while(sizeOfBlock < nbElementsPerBlock && lastParticle < nbParticles){
if(sizeOfBlock == 0 || currentBlockIndexes[sizeOfBlock-1] != particlesToSort[lastParticle].mindex){
currentBlockIndexes[sizeOfBlock] = particlesToSort[lastParticle].mindex;
nbParticlesPerLeaf[sizeOfBlock] = 1;
sizeOfBlock += 1;
}
else{
nbParticlesPerLeaf[sizeOfBlock-1] += 1;
}
lastParticle += 1;
}
while(lastParticle < nbParticles && currentBlockIndexes[sizeOfBlock-1] == particlesToSort[lastParticle].mindex){
nbParticlesPerLeaf[sizeOfBlock-1] += 1;
lastParticle += 1;
}
// Create a group
CellGroupClass*const newBlock = new CellGroupClass(currentBlockIndexes[0],
currentBlockIndexes[sizeOfBlock-1]+1,
sizeOfBlock);
ParticleGroupClass*const newParticleBlock = new ParticleGroupClass(currentBlockIndexes[0],
currentBlockIndexes[sizeOfBlock-1]+1,
sizeOfBlock, lastParticle-firstParticle);
// Init cells
size_t nbParticlesOffsetBeforeLeaf = 0;
FSize offsetParticles = firstParticle;
for(int cellIdInBlock = 0; cellIdInBlock != sizeOfBlock ; ++cellIdInBlock){
newBlock->newCell(currentBlockIndexes[cellIdInBlock], cellIdInBlock);
CompositeCellClass newNode = newBlock->getCompleteCell(cellIdInBlock);
newNode.setMortonIndex(currentBlockIndexes[cellIdInBlock]);
FTreeCoordinate coord;
coord.setPositionFromMorton(currentBlockIndexes[cellIdInBlock], idxLevel);
newNode.setCoordinate(coord);
// Add leaf
nbParticlesOffsetBeforeLeaf = newParticleBlock->newLeaf(currentBlockIndexes[cellIdInBlock], cellIdInBlock,
nbParticlesPerLeaf[cellIdInBlock], nbParticlesOffsetBeforeLeaf);
BasicAttachedClass attachedLeaf = newParticleBlock->template getLeaf<BasicAttachedClass>(cellIdInBlock);
// Copy each particle from the original position
for(FSize idxPart = 0 ; idxPart < nbParticlesPerLeaf[cellIdInBlock] ; ++idxPart){
attachedLeaf.setParticle(idxPart, particlesToSort[idxPart + offsetParticles].originalIndex, inParticlesContainer);
}
offsetParticles += nbParticlesPerLeaf[cellIdInBlock];
}
// Keep the block
cellBlocksPerLevel[idxLevel].push_back(newBlock);
particleBlocks.push_back(newParticleBlock);
sizeOfBlock = 0;
firstParticle = lastParticle;
}
delete[] nbParticlesPerLeaf;
delete[] particlesToSort;
}
// For each level from heigth - 2 to 1
for(int idxLevel = treeHeight-2; idxLevel > 0 ; --idxLevel){
inLeftLimite = (inLeftLimite == -1 ? inLeftLimite : (inLeftLimite>>3));
CellGroupConstIterator iterChildCells = cellBlocksPerLevel[idxLevel+1].begin();
const CellGroupConstIterator iterChildEndCells = cellBlocksPerLevel[idxLevel+1].end();
// Skip blocks that do not respect limit
while(iterChildCells != iterChildEndCells
&& ((*iterChildCells)->getEndingIndex()>>3) <= inLeftLimite){
++iterChildCells;
}
// If lower level is empty or all blocks skiped stop here
if(iterChildCells == iterChildEndCells){
break;
}
MortonIndex currentCellIndex = (*iterChildCells)->getStartingIndex();
if((currentCellIndex>>3) <= inLeftLimite) currentCellIndex = ((inLeftLimite+1)<<3);
int sizeOfBlock = 0;
// We need to proceed each group in sub level
while(iterChildCells != iterChildEndCells){
// Count until end of sub group is reached or we have enough cells
while(sizeOfBlock < nbElementsPerBlock && iterChildCells != iterChildEndCells ){
if((sizeOfBlock == 0 || currentBlockIndexes[sizeOfBlock-1] != (currentCellIndex>>3))
&& (*iterChildCells)->exists(currentCellIndex)){
currentBlockIndexes[sizeOfBlock] = (currentCellIndex>>3);
sizeOfBlock += 1;
currentCellIndex = (((currentCellIndex>>3)+1)<<3);
}
else{
currentCellIndex += 1;
}
// If we are at the end of the sub group, move to next
while(iterChildCells != iterChildEndCells && (*iterChildCells)->getEndingIndex() <= currentCellIndex){
++iterChildCells;
// Update morton index
if(iterChildCells != iterChildEndCells && currentCellIndex < (*iterChildCells)->getStartingIndex()){
currentCellIndex = (*iterChildCells)->getStartingIndex();
}
}
}
// If group is full
if(sizeOfBlock == nbElementsPerBlock || (sizeOfBlock && iterChildCells == iterChildEndCells)){
// Create a group
CellGroupClass*const newBlock = new CellGroupClass(currentBlockIndexes[0],
currentBlockIndexes[sizeOfBlock-1]+1,
sizeOfBlock);
// Init cells
for(int cellIdInBlock = 0; cellIdInBlock != sizeOfBlock ; ++cellIdInBlock){
newBlock->newCell(currentBlockIndexes[cellIdInBlock], cellIdInBlock);
CompositeCellClass newNode = newBlock->getCompleteCell(cellIdInBlock);
newNode.setMortonIndex(currentBlockIndexes[cellIdInBlock]);
FTreeCoordinate coord;
coord.setPositionFromMorton(currentBlockIndexes[cellIdInBlock], idxLevel);
newNode.setCoordinate(coord);
}
// Keep the block
cellBlocksPerLevel[idxLevel].push_back(newBlock);
sizeOfBlock = 0;
}
}
}
delete[] currentBlockIndexes;
}
/** This function dealloc the tree by deleting each block */
~FGroupTreeBerenger(){
for(int idxLevel = 0 ; idxLevel < treeHeight ; ++idxLevel){
std::vector<CellGroupClass*>& levelBlocks = cellBlocksPerLevel[idxLevel];
for (CellGroupClass* block: levelBlocks){
delete block;
}
}
delete[] cellBlocksPerLevel;
for (ParticleGroupClass* block: particleBlocks){
delete block;
}
}
/////////////////////////////////////////////////////////
// Lambda function to apply to all member
/////////////////////////////////////////////////////////
/**
* @brief forEachLeaf iterate on the leaf and apply the function
* @param function
*/
template<class ParticlesAttachedClass>
void forEachLeaf(std::function<void(ParticlesAttachedClass*)> function){
for (ParticleGroupClass* block: particleBlocks){
block->forEachLeaf(function);
}
}
/**
* @brief forEachLeaf iterate on the cell and apply the function
* @param function
*/
void forEachCell(std::function<void(CompositeCellClass)> function){
for(int idxLevel = 0 ; idxLevel < treeHeight ; ++idxLevel){
std::vector<CellGroupClass*>& levelBlocks = cellBlocksPerLevel[idxLevel];
for (CellGroupClass* block: levelBlocks){
block->forEachCell(function);
}
}
}
/**
* @brief forEachLeaf iterate on the cell and apply the function
* @param function
*/
void forEachCellWithLevel(std::function<void(CompositeCellClass,const int)> function){
for(int idxLevel = 0 ; idxLevel < treeHeight ; ++idxLevel){
std::vector<CellGroupClass*>& levelBlocks = cellBlocksPerLevel[idxLevel];
for (CellGroupClass* block: levelBlocks){