Mentions légales du service

Skip to content
Snippets Groups Projects
Commit 581b1f01 authored by berenger-bramas's avatar berenger-bramas
Browse files

Clean the code, add comments,

Create a file to manage the parallel tree construction.

git-svn-id: svn+ssh://scm.gforge.inria.fr/svn/scalfmm/scalfmm/trunk@168 2616d619-271b-44dc-8df4-d4a8f33a7222
parent 98d75ccb
Branches
Tags
No related merge requests found
...@@ -466,6 +466,9 @@ public: ...@@ -466,6 +466,9 @@ public:
FDEBUG(FTic prepareCounter); FDEBUG(FTic prepareCounter);
FDEBUG(FTic gatherCounter); FDEBUG(FTic gatherCounter);
//////////////////////////////////////////////////////////////////
// First know what to send to who
//////////////////////////////////////////////////////////////////
// pointer to send // pointer to send
typename OctreeClass::Iterator* toSend[nbProcess * OctreeHeight]; typename OctreeClass::Iterator* toSend[nbProcess * OctreeHeight];
...@@ -560,6 +563,9 @@ public: ...@@ -560,6 +563,9 @@ public:
} }
//////////////////////////////////////////////////////////////////
// Gather this information
//////////////////////////////////////////////////////////////////
FDEBUG(gatherCounter.tic()); FDEBUG(gatherCounter.tic());
// All process say to each others // All process say to each others
...@@ -569,6 +575,11 @@ public: ...@@ -569,6 +575,11 @@ public:
mpiassert( MPI_Allgather( indexToSend, nbProcess * OctreeHeight, MPI_INT, globalReceiveMap, nbProcess * OctreeHeight, MPI_INT, MPI_COMM_WORLD), __LINE__ ); mpiassert( MPI_Allgather( indexToSend, nbProcess * OctreeHeight, MPI_INT, globalReceiveMap, nbProcess * OctreeHeight, MPI_INT, MPI_COMM_WORLD), __LINE__ );
FDEBUG(gatherCounter.tac()); FDEBUG(gatherCounter.tac());
//////////////////////////////////////////////////////////////////
// Send and receive for real
//////////////////////////////////////////////////////////////////
FDEBUG(sendCounter.tic()); FDEBUG(sendCounter.tic());
// Then they can send and receive (because they know what they will receive) // Then they can send and receive (because they know what they will receive)
// To send in asynchrone way // To send in asynchrone way
...@@ -608,6 +619,10 @@ public: ...@@ -608,6 +619,10 @@ public:
} }
FDEBUG(sendCounter.tac()); FDEBUG(sendCounter.tac());
//////////////////////////////////////////////////////////////////
// Do M2L
//////////////////////////////////////////////////////////////////
{ {
typename OctreeClass::Iterator octreeIterator(tree); typename OctreeClass::Iterator octreeIterator(tree);
octreeIterator.moveDown(); octreeIterator.moveDown();
...@@ -643,6 +658,9 @@ public: ...@@ -643,6 +658,9 @@ public:
} }
} }
//////////////////////////////////////////////////////////////////
// Wait received data and compute
//////////////////////////////////////////////////////////////////
// Wait to receive every things (and send every things) // Wait to receive every things (and send every things)
MPI_Waitall(iterRequest, requests, 0); MPI_Waitall(iterRequest, requests, 0);
...@@ -675,6 +693,7 @@ public: ...@@ -675,6 +693,7 @@ public:
} }
// for each cells // for each cells
do{ do{
// copy cells that need data from others
if(leafsNeedOther[idxLevel]->get(realCellId++)){ if(leafsNeedOther[idxLevel]->get(realCellId++)){
iterArray[numberOfCells++] = octreeIterator; iterArray[numberOfCells++] = octreeIterator;
} }
...@@ -725,6 +744,10 @@ public: ...@@ -725,6 +744,10 @@ public:
FDEBUG( FDebug::Controller << "\t\t Prepare : " << prepareCounter.cumulated() << " s\n" ); FDEBUG( FDebug::Controller << "\t\t Prepare : " << prepareCounter.cumulated() << " s\n" );
} }
//////////////////////////////////////////////////////////////////
// ---------------- L2L ---------------
//////////////////////////////////////////////////////////////////
{ // second L2L { // second L2L
FDEBUG( FDebug::Controller.write("\tStart Downward Pass (L2L)\n").write(FDebug::Flush); ); FDEBUG( FDebug::Controller.write("\tStart Downward Pass (L2L)\n").write(FDebug::Flush); );
FDEBUG(FTic counterTime); FDEBUG(FTic counterTime);
...@@ -1076,6 +1099,7 @@ public: ...@@ -1076,6 +1099,7 @@ public:
MPI_Waitall(iterRequest, requests, 0); MPI_Waitall(iterRequest, requests, 0);
FDEBUG(waitCounter.tac()); FDEBUG(waitCounter.tac());
// Create an octree with leaves from others
OctreeClass otherP2Ptree( tree->getHeight(), tree->getSubHeight(), tree->getBoxWidth(), tree->getBoxCenter() ); OctreeClass otherP2Ptree( tree->getHeight(), tree->getSubHeight(), tree->getBoxWidth(), tree->getBoxCenter() );
for(int idxProc = 0 ; idxProc < nbProcess ; ++idxProc){ for(int idxProc = 0 ; idxProc < nbProcess ; ++idxProc){
for(int idxPart = 0 ; idxPart < globalReceiveMap[idxProc * nbProcess + idProcess] ; ++idxPart){ for(int idxPart = 0 ; idxPart < globalReceiveMap[idxProc * nbProcess + idProcess] ; ++idxPart){
......
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment