Commit 76f49d8e authored by Martin Khannouz's avatar Martin Khannouz Committed by Berenger Bramas

Add first implicit algorithm and the corresponding tests.

parent 1310ef8d
This diff is collapsed.
// Keep in private GIT
#include <iostream>
using namespace std;
#include "../../Src/Utils/FGlobal.hpp"
#include "../../Src/GroupTree/Core/FGroupTree.hpp"
#include "../../Src/Components/FSimpleLeaf.hpp"
#include "../../Src/Containers/FVector.hpp"
#include "../../Src/Utils/FMath.hpp"
#include "../../Src/Utils/FMemUtils.hpp"
#include "../../Src/Utils/FParameters.hpp"
#include "../../Src/Files/FRandomLoader.hpp"
#include "../../Src/GroupTree/Core/FGroupSeqAlgorithm.hpp"
#include "../../Src/GroupTree/Core/FGroupTaskStarpuImplicitAlgorithm.hpp"
#include "../../Src/GroupTree/StarPUUtils/FStarPUKernelCapacities.hpp"
#include "../../Src/GroupTree/StarPUUtils/FStarPUCpuWrapper.hpp"
#include "../../Src/GroupTree/Core/FP2PGroupParticleContainer.hpp"
#include "../../Src/GroupTree/Core/FGroupTaskAlgorithm.hpp"
#include "../../Src/Utils/FParameterNames.hpp"
#include "../../Src/Components/FTestParticleContainer.hpp"
#include "../../Src/Components/FTestCell.hpp"
#include "../../Src/Components/FTestKernels.hpp"
#include "../../Src/GroupTree/TestKernel/FGroupTestParticleContainer.hpp"
#include "../../Src/GroupTree/TestKernel/FTestCellPOD.hpp"
#include "../../Src/Files/FFmaGenericLoader.hpp"
#include "../../Src/Core/FFmmAlgorithm.hpp"
int main(int argc, char* argv[]){
setenv("STARPU_NCPU","1",1);
const FParameterNames LocalOptionBlocSize {
{"-bs"},
"The size of the block of the blocked tree"
};
FHelpDescribeAndExit(argc, argv, "Test the blocked tree by counting the particles.",
FParameterDefinitions::OctreeHeight, FParameterDefinitions::NbParticles,
FParameterDefinitions::OctreeSubHeight, LocalOptionBlocSize);
int provided = 0;
//MPI_Init_thread(&argc,&argv, MPI_THREAD_SERIALIZED, &provided);
typedef double FReal;
// Initialize the types
typedef FTestCellPODCore GroupCellSymbClass;
typedef FTestCellPODData GroupCellUpClass;
typedef FTestCellPODData GroupCellDownClass;
typedef FTestCellPOD GroupCellClass;
typedef FGroupTestParticleContainer<FReal> GroupContainerClass;
typedef FGroupTree< FReal, GroupCellClass, GroupCellSymbClass, GroupCellUpClass, GroupCellDownClass,
GroupContainerClass, 0, 1, long long int> GroupOctreeClass;
typedef FStarPUAllCpuCapacities<FTestKernels< GroupCellClass, GroupContainerClass >> GroupKernelClass;
typedef FStarPUCpuWrapper<typename GroupOctreeClass::CellGroupClass, GroupCellClass, GroupKernelClass, typename GroupOctreeClass::ParticleGroupClass, GroupContainerClass> GroupCpuWrapper;
typedef FGroupTaskStarPUImplicitAlgorithm<GroupOctreeClass, typename GroupOctreeClass::CellGroupClass, GroupKernelClass, typename GroupOctreeClass::ParticleGroupClass, GroupCpuWrapper > GroupAlgorithm;
typedef FTestCell CellClass;
typedef FTestParticleContainer<FReal> ContainerClass;
typedef FSimpleLeaf<FReal, ContainerClass > LeafClass;
typedef FOctree<FReal, CellClass, ContainerClass , LeafClass > OctreeClass;
typedef FTestKernels< CellClass, ContainerClass > KernelClass;
// FFmmAlgorithmTask FFmmAlgorithmThread
typedef FFmmAlgorithm<OctreeClass, CellClass, ContainerClass, KernelClass, LeafClass > FmmClass;
// Get params
const int NbLevels = FParameters::getValue(argc,argv,FParameterDefinitions::OctreeHeight.options, 5);
const int groupSize = FParameters::getValue(argc,argv,LocalOptionBlocSize.options, 250);
#ifdef STARPU_USE_MPI
cout << "MPI \\o/" <<endl;
#else
cout << "Pas de mpi -_-\" " << endl;
#endif
//#define LOAD_FILE
#ifndef LOAD_FILE
const FSize NbParticles = FParameters::getValue(argc,argv,FParameterDefinitions::NbParticles.options, FSize(20));
FRandomLoader<FReal> loader(NbParticles, 1.0, FPoint<FReal>(0,0,0), 0);
#else
// Load the particles
const char* const filename = FParameters::getStr(argc,argv,FParameterDefinitions::InputFile.options, "../Data/test20k.fma");
FFmaGenericLoader<FReal> loader(filename);
#endif
FAssertLF(loader.isOpen());
// Usual octree
OctreeClass tree(NbLevels, FParameters::getValue(argc,argv,FParameterDefinitions::OctreeSubHeight.options, 2),
loader.getBoxWidth(), loader.getCenterOfBox());
FTestParticleContainer<FReal> allParticles;
for(FSize idxPart = 0 ; idxPart < loader.getNumberOfParticles() ; ++idxPart){
FPoint<FReal> particlePosition;
#ifndef LOAD_FILE
loader.fillParticle(&particlePosition);
#else
FReal ph;
loader.fillParticle(&particlePosition, &ph);
#endif
allParticles.push(particlePosition);
tree.insert(particlePosition);
}
// Put the data into the tree
//GroupOctreeClass groupedTree(NbLevels, groupSize, &tree);
//GroupOctreeClass groupedTree(NbLevels, loader.getBoxWidth(), loader.getCenterOfBox(), groupSize, &allParticles);
//GroupOctreeClass groupedTree(NbLevels, loader.getBoxWidth(), loader.getCenterOfBox(), groupSize, &allParticles, false, true);
GroupOctreeClass groupedTree(NbLevels, loader.getBoxWidth(), loader.getCenterOfBox(), groupSize, &allParticles, false, true, 0.2);
//groupedTree.printInfoBlocks();
// Check tree structure at leaf level
groupedTree.forEachCellLeaf<GroupContainerClass>([&](GroupCellClass gcell, GroupContainerClass* gleaf){
const ContainerClass* src = tree.getLeafSrc(gcell.getMortonIndex());
if(src == nullptr){
std::cout << "[PartEmpty] Error cell should not exist " << gcell.getMortonIndex() << "\n";
}
else {
if(src->getNbParticles() != gleaf->getNbParticles()){
std::cout << "[Part] Nb particles is different at index " << gcell.getMortonIndex() << " is " << gleaf->getNbParticles() << " should be " << src->getNbParticles() << "\n";
}
}
});
// Run the algorithm
GroupKernelClass groupkernel;
GroupAlgorithm groupalgo(&groupedTree,&groupkernel);
groupalgo.execute();
// Usual algorithm
KernelClass kernels; // FTestKernels FBasicKernels
FmmClass algo(&tree,&kernels); //FFmmAlgorithm FFmmAlgorithmThread
algo.execute();
int rank = groupalgo.getRank();
// Validate the result
for(int idxLevel = 2 ; idxLevel < groupedTree.getHeight() ; ++idxLevel){
for(int idxGroup = 0 ; idxGroup < groupedTree.getNbCellGroupAtLevel(idxLevel) ; ++idxGroup){
if(groupalgo.isDataOwned(idxGroup, groupedTree.getNbCellGroupAtLevel(idxLevel))){
GroupOctreeClass::CellGroupClass* currentCells = groupedTree.getCellGroup(idxLevel, idxGroup);
currentCells->forEachCell([&](GroupCellClass gcell){
const CellClass* cell = tree.getCell(gcell.getMortonIndex(), idxLevel);
if(cell == nullptr){
std::cout << "[Empty node(" << rank << ")] Error cell should not exist " << gcell.getMortonIndex() << "\n";
}
else {
if(gcell.getDataUp() != cell->getDataUp()){
std::cout << "[Up node(" << rank << ")] Up is different at index " << gcell.getMortonIndex() << " level " << idxLevel << " is " << gcell.getDataUp() << " should be " << cell->getDataUp() << "\n";
}
if(gcell.getDataDown() != cell->getDataDown()){
std::cout << "[Down node(" << rank << ")] Down is different at index " << gcell.getMortonIndex() << " level " << idxLevel << " is " << gcell.getDataDown() << " should be " << cell->getDataDown() << "\n";
}
}
});
}
}
}
{
int idxLevel = groupedTree.getHeight()-1;
for(int idxGroup = 0 ; idxGroup < groupedTree.getNbCellGroupAtLevel(idxLevel) ; ++idxGroup){
if(groupalgo.isDataOwned(idxGroup, groupedTree.getNbCellGroupAtLevel(idxLevel))){
GroupOctreeClass::ParticleGroupClass* particleGroup = groupedTree.getParticleGroup(idxGroup);
GroupOctreeClass::CellGroupClass* cellGroup = groupedTree.getCellGroup(idxLevel, idxGroup);
cellGroup->forEachCell([&](GroupCellClass cell){
MortonIndex midx = cell.getMortonIndex();
const int leafIdx = particleGroup->getLeafIndex(midx);
GroupContainerClass leaf = particleGroup->template getLeaf<GroupContainerClass>(leafIdx);
const FSize nbPartsInLeaf = leaf.getNbParticles();
if(cell.getDataUp() != nbPartsInLeaf){
std::cout << "[P2M node(" << rank << ")] Error a Cell has " << cell.getDataUp() << " (it should be " << nbPartsInLeaf << ")\n";
}
const long long int* dataDown = leaf.getDataDown();
for(FSize idxPart = 0 ; idxPart < nbPartsInLeaf ; ++idxPart){
if(dataDown[idxPart] != loader.getNumberOfParticles()-1){
std::cout << "[Full node(" << rank << ")] Error a particle has " << dataDown[idxPart] << " (it should be " << (loader.getNumberOfParticles()-1) << ") at index " << cell.getMortonIndex() << "\n";
}
}
});
}
}
}
return 0;
}
#+TITLE: Usage implicit de MPI dans l'algorithme de ScalFmm usant de StarPU
#+AUTHOR: Martin Khannouz
#+LANGUAGE: fr
#+STARTUP: inlineimages
#+OPTIONS: H:3 num:t toc:t \n:nil @:t ::t |:t ^:nil -:t f:t *:t <:t
#+OPTIONS: TeX:t LaTeX:t skip:nil d:nil todo:nil pri:nil tags:not-in-toc
#+EXPORT_SELECT_TAGS: export
#+EXPORT_EXCLUDE_TAGS: noexport
#+TAGS: noexport(n)
* Implicit MPI in Scalfmm using StarPU
** Installation
*** Requirement
#+BEGIN_SRC sh :exports code :eval never
sudo pacman -S openmpi fftw blas lapack
#+END_SRC
*** StarPU
#+BEGIN_SRC sh :exports code :eval never
svn checkout svn://scm.gforge.inria.fr/svn/starpu/trunk StarPU
cd StarPU
./autogen.sh
mkdir install
./configure --prefix=$HOME/StarPU/install
make -j `nproc`
export STARPU_DIR=$HOME/StarPU/install
export PKG_CONFIG_PATH=$STARPU_DIR/lib/pkgconfig:$PKG_CONFIG_PATH
export LD_LIBRARY_PATH=$STARPU_DIR/lib:$LD_LIBRARY_PATH
cd
#+END_SRC
*** ScalFmm
#+BEGIN_SRC sh :exports code :eval never
git clone git+ssh://mkhannou@scm.gforge.inria.fr/gitroot/scalfmm/scalfmm.gi
cd scalfmm/Build
ccmake ..
#+END_SRC
Turn on SCALFMM_USE_MPI and SCALFMM_USE_STARPU and SCALFMM_BUILD_EXAMPLES.
Then generate the Makefile by pressing 'c' the 'g'.
To check if everything seems good execute the following command.
#+BEGIN_SRC sh :exports code :eval never
./Tests/Release/testBlockedAlgorithm
#+END_SRC
Some information about the group tree should be printed and a core dump if something went wrong.
** First Try
Firstly, copy original files.
#+BEGIN_SRC sh :exports code :eval never
cp Tests/GroupTree/testBlockedAlgorithm.cpp Tests/GroupTree/testBlockedImplicitAlgorithm.cpp
cp Src/GroupTree/Core/FGroupTaskStarpuAlgorithm.hpp Src/GroupTree/Core/FGroupTaskStarpuImplicitAlgorithm.hpp
#+END_SRC
Edit testBlockedImplicitAlgorithm and replace FGroupTaskStarPUAlgorithm by FGroupTaskStarPUImplicitAlgorithm and change an include to include FGroupTaskStarpuImplicitAlgorithm instead of FGroupTaskStarpuAlgorithm.
Edit FGroupTaskStarpuImplicitAlgorithm and replace FGroupTaskStarPUAlgorithm by FGroupTaskStarPUImplicitAlgorithm and add, somewhere :
#+BEGIN_SRC C
#include <starpu_mpi.h>
#+END_SRC
As MPI need to be initialized a call to starpu_mpi_init has to be made :
#+BEGIN_SRC C
FAssertLF(starpu_mpi_init ( 0, 0, 1 ) == 0);
#+END_SRC
The first and second 0 are there because we don't need to give argument to MPI.
The 1 is to tell starpu to call MPI_Init.
As every malloc need a free, a starpu_mpi_init need starpu_mpi_shutdown which is written in the destructor, before the call of starpu_shutdown.
To check if everything goes right, the following code had added right after starpu_mpi_init and it should print the number of node and the rank of the current node.
#+BEGIN_SRC C
int rank, nbProc;
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Comm_size(MPI_COMM_WORLD,&nbProc);
cout << rank << "/" << nbProc << endl;
#+END_SRC
To run (on my computer) multiple node, I use mpirun as follow.
#+BEGIN_SRC sh :exports code :eval never
mpirun -n 2 Tests/Release/testBlockedImplicitAlgorithm
#+END_SRC
Somewhere, the following two lines should appear in the output of mpirun.
0/2
1/2
** Let's MPIse a little bit
*** Simple start
Firstly, every starpu_insert_task (or starpu_task_insert) must be replaced by starpu_mpi_insert_task.
So a previous call like
#+BEGIN_SRC C
starpu_insert_task(&some_codelet ...
#+END_SRC
will end as
#+BEGIN_SRC C
starpu_mpi_insert_task(MPI_COMM_WORLD, &some_codelet ...
#+END_SRC
After a quick execution, it turn out that starpu_mpi doesn't like unregistered data, so in the function buildHandles we must add some starpu_mpi_data_register.
For each handle registered in starpu, I added
#+BEGIN_SRC C
starpu_mpi_data_register(corresponding_handle, tag++, 0);
#+END_SRC
with tag initialized to 0.
The tag just has to be unique and all handle are register in the same function so I used it this way.
I tried to use starpu_mpi_data_set_rank, but for some dark reasons, it couldn't register more than one handle (even if they were different).
So from here we have all data registered on the node 0 and all tasks known by starpu_mpi.
If I run the code, the node 1 has multiple errors because his Octree doesn't contains values it should.
This is quite understandable because according to StarPU documentation (and according to Marc) each task is executed where their data are.
And because all the data are registered on node 0, no wonder node 1 doesn't have any valid data.
Because there is some error in syncData function, I disable it for the node 1. But in the future each node will have to acquire and release handle of its node only.
If we don't want any error on node 1 either, we should register data handles with the mpi rank instead of 0. So each node will execute the whole algorithm without exchanging data with the others and will have an consistant octree.
*** Complicated continue
Let's see if starpu_mpi can handle many node working together.
Thanks to the macro STARPU_EXECUTE_ON_NODE, a task can be attached to a specific node.
So if each task inserted is executed on node 1 and all data are registered on node 0, StarPU will have to exhange data somehow.
So I added
#+BEGIN_SRC C
STARPU_EXECUTE_ON_NODE, 1
#+END_SRC
for each starpu_mpi_insert_task.
When I runned it, everything seems right but, to ensure that node 1 execute everything, I prevented node 1 from creating the DAG (Directed Acyclic Graph).
And it turn out that node 0 was blocked in starpu_task_wait_for_all at the end of executeCore. Which mean that all tasks were really executed on node 1 so starpu seems good at exhanging data.
** Distributed FMM with StarPU
*** First Data Mapping
To correctly distribute data among nodes, the algorithm need a function that return the mpi node were a block of the grouped tree will be registered, given it location in the tree.
The following methode was used to distribute data.
#+BEGIN_SRC C
int dataMapping(int idxGroup, int const nbGroup) const {
int bonusGroup = nbGroup%nproc; //Number of node in the bonus group
int groupPerNode = (int)floor((double)nbGroup / (double)nproc);
int mpi_node;
if(idxGroup < bonusGroup*(groupPerNode+1)) { //GroupCell in the bonus group
mpi_node = (idxGroup - (idxGroup%(groupPerNode+1)))/(groupPerNode+1);
}
else { //GroupCell outside the bonus group
idxGroup -= bonusGroup*(groupPerNode+1); //Remove the bonus group to compute easily
mpi_node = (idxGroup - (idxGroup%groupPerNode))/groupPerNode; //Find the corresponding node as if their was node in the bonus group
mpi_node += bonusGroup; //Shift to find the real node
}
return mpi_node;
}
#+END_SRC
The choice was to split each level between all nodes. So each node has consecutive group of cells. That's why the only parameters needed wera the size of the level and the index in the level.
The bonus group is the first nodes that have one more element to compute because the level size isn't a multiple of the number of node.
For simpler code a new public methode has been added to check if a block of the grouped tree is owned.
#+BEGIN_SRC C
bool isDataOwned(int idxGroup, int nbGroup) const {
return dataMapping(idxGroup, nbGroup) == mpi_rank;
}
#+END_SRC
This function is used, when unregistering data so only the data owned by the node are unregitered.
#+BEGIN_SRC C
void cleanHandle(){
for(int idxLevel = 0 ; idxLevel < tree->getHeight() ; ++idxLevel){
for(int idxHandle = 0 ; idxHandle < int(cellHandles[idxLevel].size()) ; ++idxHandle){
if(isDataOwned(idxHandle, int(cellHandles[idxLevel].size())))//Clean only our data handle
{
starpu_data_unregister(cellHandles[idxLevel][idxHandle].symb);
starpu_data_unregister(cellHandles[idxLevel][idxHandle].up);
starpu_data_unregister(cellHandles[idxLevel][idxHandle].down);
}
}
cellHandles[idxLevel].clear();
}
{
for(int idxHandle = 0 ; idxHandle < int(particleHandles.size()) ; ++idxHandle){
if(isDataOwned(idxHandle, int(particleHandles.size())))
{
starpu_data_unregister(particleHandles[idxHandle].symb);
starpu_data_unregister(particleHandles[idxHandle].down);
}
}
particleHandles.clear();
}
}
#+END_SRC
Now let's look at the function that build handles.
#+BEGIN_SRC C
void buildHandles(){
cleanHandle();
int where, tag = 0;
for(int idxLevel = 2 ; idxLevel < tree->getHeight() ; ++idxLevel){
cellHandles[idxLevel].resize(tree->getNbCellGroupAtLevel(idxLevel));
for(int idxGroup = 0 ; idxGroup < tree->getNbCellGroupAtLevel(idxLevel) ; ++idxGroup){
int registeringNode = dataMapping(idxGroup, tree->getNbCellGroupAtLevel(idxLevel));
where = (registeringNode == mpi_rank) ? STARPU_MAIN_RAM : -1;
const CellContainerClass* currentCells = tree->getCellGroup(idxLevel, idxGroup);
starpu_variable_data_register(&cellHandles[idxLevel][idxGroup].symb, where,
(uintptr_t)currentCells->getRawBuffer(), currentCells->getBufferSizeInByte());
starpu_variable_data_register(&cellHandles[idxLevel][idxGroup].up, where,
(uintptr_t)currentCells->getRawMultipoleBuffer(), currentCells->getMultipoleBufferSizeInByte());
starpu_variable_data_register(&cellHandles[idxLevel][idxGroup].down, where,
(uintptr_t)currentCells->getRawLocalBuffer(), currentCells->getLocalBufferSizeInByte());
starpu_mpi_data_register(cellHandles[idxLevel][idxGroup].symb, tag++, registeringNode);
starpu_mpi_data_register(cellHandles[idxLevel][idxGroup].up, tag++, registeringNode);
starpu_mpi_data_register(cellHandles[idxLevel][idxGroup].down, tag++, registeringNode);
cellHandles[idxLevel][idxGroup].intervalSize = int(currentCells->getNumberOfCellsInBlock());
#ifdef STARPU_SUPPORT_ARBITER
starpu_data_assign_arbiter(cellHandles[idxLevel][idxGroup].up, arbiterGlobal);
starpu_data_assign_arbiter(cellHandles[idxLevel][idxGroup].down, arbiterGlobal);
#endif
}
}
{
particleHandles.resize(tree->getNbParticleGroup());
for(int idxGroup = 0 ; idxGroup < tree->getNbParticleGroup() ; ++idxGroup){
int registeringNode = dataMapping(idxGroup, tree->getNbParticleGroup());
where = (registeringNode == mpi_rank) ? STARPU_MAIN_RAM : -1;
ParticleGroupClass* containers = tree->getParticleGroup(idxGroup);
starpu_variable_data_register(&particleHandles[idxGroup].symb, where,
(uintptr_t)containers->getRawBuffer(), containers->getBufferSizeInByte());
starpu_variable_data_register(&particleHandles[idxGroup].down, where,
(uintptr_t)containers->getRawAttributesBuffer(), containers->getAttributesBufferSizeInByte());
starpu_mpi_data_register(particleHandles[idxGroup].symb, tag++, registeringNode);
starpu_mpi_data_register(particleHandles[idxGroup].down, tag++, registeringNode);
#ifdef STARPU_USE_REDUX
starpu_data_set_reduction_methods(particleHandles[idxGroup].down, &p2p_redux_perform,
&p2p_redux_init);
#else
#ifdef STARPU_SUPPORT_ARBITER
starpu_data_assign_arbiter(particleHandles[idxGroup].down, arbiterGlobal);
#endif // STARPU_SUPPORT_ARBITER
#endif // STARPU_USE_REDUX
particleHandles[idxGroup].intervalSize = int(containers->getNumberOfLeavesInBlock());
}
}
}
#+END_SRC
There is at least 2 new stuff. The variable "where" which tell if a handle is in the node memory or if it's somewhere else. And the variable "registeringNode" which tell on which node the block should be registered.
*** Checking result
Now I have a beautiful mapping function how on earth am I suppose to check if the result is correct ?
To answer this question, I need to clarify a little bit on how StarPU MPI work (from what I understood of the documentation).
When a node register a data, every time a task will write this data somewhere else, the fresh data should be write back to the node that registered the data.
So each node should have an up to date value in its own cells.
To check the result I used the following algorithm.
- Load the particle and create two tree (a classic tree and a grouped one)
- Execute both distributed and classic algorithm
- For each cell and leaf that I own in the grouped tree
* get the corresponding cell in the classic tree
* compare the value
* Call checking function created by Bérenger
For those who prefer the C code, there it is.
#+BEGIN_SRC C
for(int idxLevel = 2 ; idxLevel < groupedTree.getHeight() ; ++idxLevel){
for(int idxGroup = 0 ; idxGroup < groupedTree.getNbCellGroupAtLevel(idxLevel) ; ++idxGroup){
if(groupalgo.isDataOwned(idxGroup, groupedTree.getNbCellGroupAtLevel(idxLevel))){
GroupOctreeClass::CellGroupClass* currentCells = groupedTree.getCellGroup(idxLevel, idxGroup);
currentCells->forEachCell([&](GroupCellClass gcell){
const CellClass* cell = tree.getCell(gcell.getMortonIndex(), idxLevel);
if(cell == nullptr){
std::cout << "[Empty node(" << rank << ")] Error cell should not exist " << gcell.getMortonIndex() << "\n";
}
else {
if(gcell.getDataUp() != cell->getDataUp()){
std::cout << "[Up node(" << rank << ")] Up is different at index " << gcell.getMortonIndex() << " level " << idxLevel << " is " << gcell.getDataUp() << " should be " << cell->getDataUp() << "\n";
}
if(gcell.getDataDown() != cell->getDataDown()){
std::cout << "[Down node(" << rank << ")] Down is different at index " << gcell.getMortonIndex() << " level " << idxLevel << " is " << gcell.getDataDown() << " should be " << cell->getDataDown() << "\n";
}
}
});
}
}
}
{
int idxLevel = groupedTree.getHeight()-1;
for(int idxGroup = 0 ; idxGroup < groupedTree.getNbCellGroupAtLevel(idxLevel) ; ++idxGroup){
if(groupalgo.isDataOwned(idxGroup, groupedTree.getNbCellGroupAtLevel(idxLevel))){
GroupOctreeClass::ParticleGroupClass* particleGroup = groupedTree.getParticleGroup(idxGroup);
GroupOctreeClass::CellGroupClass* cellGroup = groupedTree.getCellGroup(idxLevel, idxGroup);
cellGroup->forEachCell([&](GroupCellClass cell){
MortonIndex midx = cell.getMortonIndex();
const int leafIdx = particleGroup->getLeafIndex(midx);
GroupContainerClass leaf = particleGroup->template getLeaf<GroupContainerClass>(leafIdx);
const FSize nbPartsInLeaf = leaf.getNbParticles();
if(cell.getDataUp() != nbPartsInLeaf){
std::cout << "[P2M node(" << rank << ")] Error a Cell has " << cell.getDataUp() << " (it should be " << nbPartsInLeaf << ")\n";
}
const long long int* dataDown = leaf.getDataDown();
for(FSize idxPart = 0 ; idxPart < nbPartsInLeaf ; ++idxPart){
if(dataDown[idxPart] != loader.getNumberOfParticles()-1){
std::cout << "[Full node(" << rank << ")] Error a particle has " << dataDown[idxPart] << " (it should be " << (loader.getNumberOfParticles()-1) << ") at index " << cell.getMortonIndex() << "\n";
}
}
});
}
}
}
#+END_SRC
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment