Une MAJ de sécurité est nécessaire sur notre version actuelle. Elle sera effectuée lundi 02/08 entre 12h30 et 13h. L'interruption de service devrait durer quelques minutes (probablement moins de 5 minutes).

Commit 794015bd authored by Berenger Bramas's avatar Berenger Bramas
Browse files

debug mpi from tinker branch feedback (it does not include the tinker part)

parent f2533aae
......@@ -164,6 +164,8 @@ int main(int argc, char* argv[])
tree.getBoxWidth(),
tree.getHeight(), &finalParticles,&balancer);
std::cout << "Local nb particles after sort "
<< finalParticles.getSize() << std::endl;
for(FSize idx = 0 ; idx < finalParticles.getSize(); ++idx){
tree.insert(finalParticles[idx].position,finalParticles[idx].indexInFile,finalParticles[idx].physicalValue);
}
......
......@@ -4,6 +4,9 @@ the more restrictive has to be used.
ScalFmm est régi par la licence CeCILL-C & LGPL, en cas de conflit
la plus restrictive prime.
Folders under Addons might have separate Licence, in such case
one can find a dedicated Licence file where appropriate.
//////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
......
......@@ -81,7 +81,10 @@ public:
pack.elementTo = Min(allObjectives[idxProc].second , myCurrentInterval.second) - myCurrentInterval.first;
// Next time give from the previous end
currentElement = pack.elementTo;
packToSend.push_back(pack);
if(pack.elementTo - pack.elementFrom != 0){
packToSend.push_back(pack);
}
// Progress
idxProc += 1;
}
......
// ===================================================================================
// Copyright ScalFmm 2011 INRIA, Olivier Coulaud, Bérenger Bramas, Matthias Messner
// olivier.coulaud@inria.fr, berenger.bramas@inria.fr
// This software is a computer program whose purpose is to compute the FMM.
//
// This software is governed by the CeCILL-C and LGPL licenses and
// abiding by the rules of distribution of free software.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public and CeCILL-C Licenses for more details.
// "http://www.cecill.info".
// "http://www.gnu.org/licenses".
// ===================================================================================
#ifndef FPARTITIONSMAPPING_HPP
#define FPARTITIONSMAPPING_HPP
#include "Utils/FGlobal.hpp"
#include "Utils/FMpi.hpp"
#include "Containers/FVector.hpp"
#include "FLeafBalance.hpp"
#include "Files/FMpiTreeBuilder.hpp"
template <class FReal>
class FPartitionsMapping {
protected:
FMpi::FComm comm;
//! The number of particles from the initial decomposition
FSize nbParticlesInitial;
//! The number of particles from the scalfmm decomposition
FSize nbParticlesWorking;
std::unique_ptr<FSize[]> nbParticlesSentToOthers;
std::unique_ptr<FSize[]> offsetNbParticlesSentToOthers;
std::unique_ptr<FSize[]> nbParticlesRecvFromOthers;
std::unique_ptr<FSize[]> offsetNbParticlesRecvFromOthers;
std::unique_ptr<FSize[]> mappingToOthers;
public:
FPartitionsMapping(const FMpi::FComm& inComm)
: comm(inComm), nbParticlesInitial(0), nbParticlesWorking(0) {
}
void setComm(const FMpi::FComm& inComm){
comm = inComm;
}
template< int NbPhysicalValuesPerPart>
struct TestParticle{
FPoint<FReal> position;
std::array<FReal, NbPhysicalValuesPerPart> physicalValues;
FSize localIndex;
int initialProcOwner;
const FPoint<FReal>& getPosition() const {
return position;
}
};
template< int NbPhysicalValuesPerPart, class FillerClass>
FVector<TestParticle<NbPhysicalValuesPerPart>> distributeParticles(const FSize inNbParticles,
const FPoint<FReal>& centerOfBox, const FReal boxWidth,
const int TreeHeight, FillerClass filler){
nbParticlesInitial = inNbParticles;
////////////////////////////////////////////////////////
std::unique_ptr<TestParticle<NbPhysicalValuesPerPart>[]> initialParticles(new TestParticle<NbPhysicalValuesPerPart>[inNbParticles]);
// Create the array to distribute
for(int idxPart = 0 ; idxPart < nbParticlesInitial ; ++idxPart){
filler(idxPart, &initialParticles[idxPart].position, &initialParticles[idxPart].physicalValues);
initialParticles[idxPart].localIndex = idxPart;
initialParticles[idxPart].initialProcOwner = comm.processId();
}
FVector<TestParticle<NbPhysicalValuesPerPart>> finalParticles;
FLeafBalance balancer;
FMpiTreeBuilder< FReal,TestParticle<NbPhysicalValuesPerPart> >::DistributeArrayToContainer(comm,initialParticles.get(),
nbParticlesInitial,
centerOfBox,
boxWidth,
TreeHeight,
&finalParticles, &balancer);
FQuickSort<TestParticle<NbPhysicalValuesPerPart>,FSize>::QsOmp(finalParticles.data(), finalParticles.getSize(),
[](const TestParticle<NbPhysicalValuesPerPart>& p1,
const TestParticle<NbPhysicalValuesPerPart>& p2){
return p1.initialProcOwner < p2.initialProcOwner
|| (p1.initialProcOwner == p2.initialProcOwner
&& p1.localIndex < p2.localIndex);
});
////////////////////////////////////////////////////////
nbParticlesWorking = finalParticles.getSize();
nbParticlesRecvFromOthers.reset(new FSize[comm.processCount()]);
memset(nbParticlesRecvFromOthers.get(), 0 , sizeof(FSize)*comm.processCount());
for(FSize idxPart = 0 ; idxPart < finalParticles.getSize() ; ++idxPart){
// Count the particles received from each proc
nbParticlesRecvFromOthers[finalParticles[idxPart].initialProcOwner] += 1;
}
offsetNbParticlesRecvFromOthers.reset(new FSize[comm.processCount()+1]);
offsetNbParticlesRecvFromOthers[0] = 0;
for(int idxProc = 0 ; idxProc < comm.processCount() ; ++idxProc){
offsetNbParticlesRecvFromOthers[idxProc+1] = offsetNbParticlesRecvFromOthers[idxProc]
+ nbParticlesRecvFromOthers[idxProc];
}
////////////////////////////////////////////////////////
std::unique_ptr<FSize[]> nbParticlesRecvFromOthersAllAll(new FSize[comm.processCount()*comm.processCount()]);
// Exchange how many each proc receive from aother
FMpi::MpiAssert( MPI_Allgather( nbParticlesRecvFromOthers.get(), comm.processCount(), FMpi::GetType(FSize()),
nbParticlesRecvFromOthersAllAll.get(), comm.processCount(),
FMpi::GetType(FSize()), comm.getComm()), __LINE__ );
////////////////////////////////////////////////////////
nbParticlesSentToOthers.reset(new FSize[comm.processCount()]);
FSize checkerSent = 0;
offsetNbParticlesSentToOthers.reset(new FSize[comm.processCount()+1]);
offsetNbParticlesSentToOthers[0] = 0;
for(int idxProc = 0 ; idxProc < comm.processCount() ; ++idxProc){
nbParticlesSentToOthers[idxProc] = nbParticlesRecvFromOthersAllAll[comm.processCount()*idxProc + comm.processId()];
checkerSent += nbParticlesSentToOthers[idxProc];
offsetNbParticlesSentToOthers[idxProc+1] = offsetNbParticlesSentToOthers[idxProc]
+ nbParticlesSentToOthers[idxProc];
}
// I must have send what I was owning at the beginning
FAssertLF(checkerSent == nbParticlesInitial);
////////////////////////////////////////////////////////
std::unique_ptr<FSize[]> localIdsRecvOrdered(new FSize[nbParticlesWorking]);
// We list the local id in order of the different proc
for(FSize idxPart = 0 ; idxPart < finalParticles.getSize() ; ++idxPart){
const int procOwner = finalParticles[idxPart].initialProcOwner;
localIdsRecvOrdered[idxPart] = finalParticles[idxPart].localIndex;
FAssertLF(offsetNbParticlesRecvFromOthers[procOwner] <= idxPart
&& idxPart < offsetNbParticlesRecvFromOthers[procOwner+1]);
}
////////////////////////////////////////////////////////
std::unique_ptr<FSize[]> localIdsSendOrdered(new FSize[nbParticlesInitial]);
std::unique_ptr<MPI_Request[]> requests(new MPI_Request[comm.processCount()*2]);
int iterRequest = 0;
for(int idxProc = 0 ; idxProc < comm.processCount() ; ++idxProc){
if(idxProc == comm.processId()){
FAssertLF(nbParticlesRecvFromOthers[idxProc] == nbParticlesSentToOthers[idxProc]);
memcpy(&localIdsSendOrdered[offsetNbParticlesSentToOthers[idxProc]],
&localIdsRecvOrdered[offsetNbParticlesRecvFromOthers[idxProc]],
sizeof(FSize)*nbParticlesRecvFromOthers[idxProc]);
}
else{
const FSize nbRecvFromProc = nbParticlesRecvFromOthers[idxProc];
if(nbRecvFromProc){
FMpi::MpiAssert( MPI_Isend(&localIdsRecvOrdered[offsetNbParticlesRecvFromOthers[idxProc]],
int(nbRecvFromProc),
FMpi::GetType(FSize()), idxProc,
99, comm.getComm(), &requests[iterRequest++]), __LINE__ );
}
const FSize nbSendToProc = nbParticlesSentToOthers[idxProc];
if(nbSendToProc){
FMpi::MpiAssert( MPI_Irecv(&localIdsSendOrdered[offsetNbParticlesSentToOthers[idxProc]],
int(nbSendToProc),
FMpi::GetType(FSize()), idxProc,
99, comm.getComm(), &requests[iterRequest++]), __LINE__ );
}
}
}
FMpi::MpiAssert( MPI_Waitall( iterRequest, requests.get(), MPI_STATUSES_IGNORE), __LINE__ );
////////////////////////////////////////////////////////
mappingToOthers.reset(new FSize[nbParticlesInitial]);
for(FSize idxPart = 0; idxPart < nbParticlesInitial ; ++idxPart){
mappingToOthers[localIdsSendOrdered[idxPart]] = idxPart;
}
return std::move(finalParticles);
}
////////////////////////////////////////////////////////
// physicalValues must be of size nbParticlesInitial
template< int NbPhysicalValuesPerPart>
std::unique_ptr<std::array<FReal, NbPhysicalValuesPerPart>[]> distributeData(
const std::array<FReal, NbPhysicalValuesPerPart> physicalValues[]){
std::unique_ptr<std::array<FReal, NbPhysicalValuesPerPart>[]> physicalValuesRorder(
new std::array<FReal, NbPhysicalValuesPerPart>[nbParticlesInitial]);
for(FSize idxPart = 0; idxPart < nbParticlesInitial ; ++idxPart){
physicalValuesRorder[mappingToOthers[idxPart]] = physicalValues[idxPart];
}
// Allocate the array to store the physical values of my working interval
std::unique_ptr<std::array<FReal, NbPhysicalValuesPerPart>[]> recvPhysicalValues(new std::array<FReal, NbPhysicalValuesPerPart>[nbParticlesWorking]);
std::unique_ptr<MPI_Request[]> requests(new MPI_Request[comm.processCount()*2]);
int iterRequest = 0;
for(int idxProc = 0 ; idxProc < comm.processCount() ; ++idxProc){
if(idxProc == comm.processId()){
FAssertLF(nbParticlesRecvFromOthers[idxProc] == nbParticlesSentToOthers[idxProc]);
memcpy(&recvPhysicalValues[offsetNbParticlesRecvFromOthers[idxProc]],
&physicalValuesRorder[offsetNbParticlesSentToOthers[idxProc]],
sizeof(std::array<FReal, NbPhysicalValuesPerPart>)*nbParticlesRecvFromOthers[idxProc]);
}
else{
const FSize nbSendToProc = nbParticlesSentToOthers[idxProc];
if(nbSendToProc){
FMpi::MpiAssert( MPI_Isend(
const_cast<std::array<FReal, NbPhysicalValuesPerPart>*>(&physicalValuesRorder[offsetNbParticlesSentToOthers[idxProc]]),
int(nbSendToProc*sizeof(std::array<FReal, NbPhysicalValuesPerPart>)),
MPI_BYTE, idxProc,
2222, comm.getComm(), &requests[iterRequest++]), __LINE__ );;
}
const FSize nbRecvFromProc = nbParticlesRecvFromOthers[idxProc];
if(nbRecvFromProc){
FMpi::MpiAssert( MPI_Irecv(
(void*)&recvPhysicalValues[offsetNbParticlesRecvFromOthers[idxProc]],
int(nbRecvFromProc*sizeof(std::array<FReal, NbPhysicalValuesPerPart>)),
MPI_INT, idxProc,
2222, comm.getComm(), &requests[iterRequest++]), __LINE__ );;
}
}
}
FMpi::MpiAssert( MPI_Waitall( iterRequest, requests.get(), MPI_STATUSES_IGNORE), __LINE__ );
return std::move(recvPhysicalValues);
}
////////////////////////////////////////////////////////
// resValues must be of size nbParticlesWorking
template< int NbResValuesPerPart>
std::unique_ptr<std::array<FReal, NbResValuesPerPart>[]> getResultingData(
const std::array<FReal, NbResValuesPerPart> resValues[]){
// First allocate the array to store the result
std::unique_ptr<std::array<FReal, NbResValuesPerPart>[]> recvPhysicalValues(
new std::array<FReal, NbResValuesPerPart>[nbParticlesInitial]);
std::unique_ptr<MPI_Request[]> requests(new MPI_Request[comm.processCount()*2]);
int iterRequest = 0;
for(int idxProc = 0 ; idxProc < comm.processCount() ; ++idxProc){
if(idxProc == comm.processId()){
FAssertLF(nbParticlesRecvFromOthers[idxProc] == nbParticlesSentToOthers[idxProc]);
memcpy(&recvPhysicalValues[offsetNbParticlesSentToOthers[idxProc]],
&resValues[offsetNbParticlesRecvFromOthers[idxProc]],
sizeof(std::array<FReal, NbResValuesPerPart>)*nbParticlesRecvFromOthers[idxProc]);
}
else{
// I originaly receive nbRecvFromProc, so I should
// send nbRecvFromProc back to the real owner
const FSize nbRecvFromProc = nbParticlesRecvFromOthers[idxProc];
if(nbRecvFromProc){
FMpi::MpiAssert( MPI_Isend(
const_cast<std::array<FReal, NbResValuesPerPart>*>(&resValues[offsetNbParticlesRecvFromOthers[idxProc]]),
int(nbRecvFromProc*sizeof(std::array<FReal, NbResValuesPerPart>)),
MPI_BYTE, idxProc,
1111, comm.getComm(), &requests[iterRequest++]), __LINE__ );;
}
// I sent nbSendToProc to idxProc,
// so I should receive nbSendToProc in my interval
const FSize nbSendToProc = nbParticlesSentToOthers[idxProc];
if(nbSendToProc){
FMpi::MpiAssert( MPI_Irecv(
&recvPhysicalValues[offsetNbParticlesSentToOthers[idxProc]],
int(nbSendToProc*sizeof(std::array<FReal, NbResValuesPerPart>)),
MPI_BYTE, idxProc,
1111, comm.getComm(), &requests[iterRequest++]), __LINE__ );;
}
}
}
FMpi::MpiAssert( MPI_Waitall( iterRequest, requests.get(), MPI_STATUSES_IGNORE), __LINE__ );
std::unique_ptr<std::array<FReal, NbResValuesPerPart>[]> recvPhysicalValuesOrder(
new std::array<FReal, NbResValuesPerPart>[nbParticlesInitial]);
for(FSize idxPart = 0; idxPart < nbParticlesInitial ; ++idxPart){
recvPhysicalValuesOrder[idxPart] = recvPhysicalValues[mappingToOthers[idxPart]];
}
return std::move(recvPhysicalValuesOrder);
}
////////////////////////////////////////////////////////
FSize getNbParticlesWorking() const{
return nbParticlesWorking;
}
FSize getMappingResultToLocal(const FSize inIdx) const{
return mappingToOthers[inIdx];
}
};
#endif
......@@ -1685,6 +1685,10 @@ public:
* @param function
*/
void forEachLeaf(std::function<void(LeafClass*)> function){
if(isEmpty()){
return;
}
Iterator octreeIterator(this);
octreeIterator.gotoBottomLeft();
......@@ -1698,6 +1702,10 @@ public:
* @param function
*/
void forEachCell(std::function<void(CellClass*)> function){
if(isEmpty()){
return;
}
Iterator octreeIterator(this);
octreeIterator.gotoBottomLeft();
......@@ -1717,6 +1725,10 @@ public:
* @param function
*/
void forEachCellWithLevel(std::function<void(CellClass*,const int)> function){
if(isEmpty()){
return;
}
Iterator octreeIterator(this);
octreeIterator.gotoBottomLeft();
......@@ -1736,6 +1748,10 @@ public:
* @param function
*/
void forEachCellLeaf(std::function<void(CellClass*,LeafClass*)> function){
if(isEmpty()){
return;
}
Iterator octreeIterator(this);
octreeIterator.gotoBottomLeft();
......
This diff is collapsed.
......@@ -162,7 +162,7 @@ public:
// To merge the leaves
//////////////////////////////////////////////////////////////////////////
static void MergeSplitedLeaves(const FMpi::FComm& communicator, IndexedParticle* workingArray, FSize* workingSize,
static void MergeSplitedLeaves(const FMpi::FComm& communicator, IndexedParticle** workingArray, FSize* workingSize,
FSize ** leavesOffsetInParticles, ParticleClass** particlesArrayInLeafOrder, FSize* const leavesSize){
const int myRank = communicator.processId();
const int nbProcs = communicator.processCount();
......@@ -171,13 +171,13 @@ public:
{ // Get the information of the leaves
leavesInfo.clear();
if((*workingSize)){
leavesInfo.push({workingArray[0].index, 1, 0});
leavesInfo.push({(*workingArray)[0].index, 1, 0});
for(FSize idxPart = 1 ; idxPart < (*workingSize) ; ++idxPart){
if(leavesInfo.data()[leavesInfo.getSize()-1].mindex == workingArray[idxPart].index){
if(leavesInfo.data()[leavesInfo.getSize()-1].mindex == (*workingArray)[idxPart].index){
leavesInfo.data()[leavesInfo.getSize()-1].nbParts += 1;
}
else{
leavesInfo.push({workingArray[idxPart].index, 1, idxPart});
leavesInfo.push({(*workingArray)[idxPart].index, 1, idxPart});
}
}
}
......@@ -209,12 +209,16 @@ public:
while(0 < idProcToSendTo &&
(allProcFirstLeafStates[(idProcToSendTo-1)*2 + 1].mindex == borderLeavesState[0].mindex
|| allProcFirstLeafStates[(idProcToSendTo-1)*2 + 1].mindex == noDataFlag)){
FLOG(if(VerboseLog) FLog::Controller << "SCALFMM-DEBUG [" << communicator.processId() << "] idProcToSendTo "
<< idProcToSendTo << " allProcFirstLeafStates[(idProcToSendTo-1)*2 + 1].mindex " <<
allProcFirstLeafStates[(idProcToSendTo-1)*2 + 1].mindex << " borderLeavesState[0].mindex " <<
borderLeavesState[0].mindex << "\n"; FLog::Controller.flush(); );
idProcToSendTo -= 1;
}
// We found someone
if(idProcToSendTo != myRank && allProcFirstLeafStates[(idProcToSendTo)*2 + 1].mindex == borderLeavesState[0].mindex){
// Post and send message for the first leaf
FMpi::ISendSplit(&workingArray[0], borderLeavesState[0].nbParts, idProcToSendTo,
FMpi::ISendSplit(&(*workingArray)[0], borderLeavesState[0].nbParts, idProcToSendTo,
FMpi::TagExchangeIndexs, communicator, &requests);
FLOG(if(VerboseLog) FLog::Controller << "SCALFMM-DEBUG [" << communicator.processId() << "] send " << borderLeavesState[0].nbParts << " to " << idProcToSendTo << "\n"; FLog::Controller.flush(); );
hasSentFirstLeaf = true;
......@@ -228,11 +232,18 @@ public:
// Count all the particle of our first leaf on other procs
FSize totalNbParticlesToRecv = 0;
int idProcToRecvFrom = myRank;
while(idProcToRecvFrom+1 < nbProcs &&
(borderLeavesState[1].mindex == allProcFirstLeafStates[(idProcToRecvFrom+1)*2].mindex
|| allProcFirstLeafStates[(idProcToRecvFrom+1)*2].mindex == noDataFlag)){
idProcToRecvFrom += 1;
totalNbParticlesToRecv += allProcFirstLeafStates[(idProcToRecvFrom)*2].nbParts;
if(!hasSentFirstLeaf || borderLeavesState[0].mindex != borderLeavesState[1].mindex){
while(idProcToRecvFrom+1 < nbProcs &&
(borderLeavesState[1].mindex == allProcFirstLeafStates[(idProcToRecvFrom+1)*2].mindex
|| allProcFirstLeafStates[(idProcToRecvFrom+1)*2].mindex == noDataFlag)){
FLOG(if(VerboseLog) FLog::Controller << "SCALFMM-DEBUG [" << communicator.processId() << "] idProcToRecvFrom "
<< idProcToRecvFrom << " allProcFirstLeafStates[(idProcToRecvFrom+1)*2].mindex " <<
allProcFirstLeafStates[(idProcToRecvFrom+1)*2].mindex << " borderLeavesState[1].mindex " <<
borderLeavesState[1].mindex << "\n"; FLog::Controller.flush(); );
idProcToRecvFrom += 1;
totalNbParticlesToRecv += allProcFirstLeafStates[(idProcToRecvFrom)*2].nbParts;
}
}
// If there are some
if(totalNbParticlesToRecv){
......@@ -262,7 +273,7 @@ public:
const FSize offsetParticles = borderLeavesState[0].nbParts;
// Move all the particles
for(FSize idxPart = offsetParticles ; idxPart < (*workingSize) ; ++idxPart){
workingArray[idxPart - offsetParticles] = workingArray[idxPart];
(*workingArray)[idxPart - offsetParticles] = (*workingArray)[idxPart];
}
// Move all the leaf
for(int idxLeaf = 1 ; idxLeaf < leavesInfo.getSize() ; ++idxLeaf){
......@@ -276,14 +287,16 @@ public:
if(hasExtendLastLeaf){
// Allocate array
const FSize finalParticlesNumber = (*workingSize) + receivedParticles.size();
FLOG(if(VerboseLog) FLog::Controller << "SCALFMM-DEBUG [" << communicator.processId() << "] Create array "
<< finalParticlesNumber << " particles\n"; FLog::Controller.flush(); );
IndexedParticle* particlesWithExtension = new IndexedParticle[finalParticlesNumber];
// Copy old data
memcpy(particlesWithExtension, workingArray, (*workingSize)*sizeof(IndexedParticle));
memcpy(particlesWithExtension, (*workingArray), (*workingSize)*sizeof(IndexedParticle));
// Copy received data
memcpy(particlesWithExtension + (*workingSize), receivedParticles.data(), receivedParticles.size()*sizeof(IndexedParticle));
// Move ptr
delete[] workingArray;
workingArray = particlesWithExtension;
delete[] (*workingArray);
(*workingArray) = particlesWithExtension;
(*workingSize) = finalParticlesNumber;
leavesInfo[leavesInfo.getSize()-1].nbParts += receivedParticles.size();
}
......@@ -298,7 +311,7 @@ public:
//Copy all the particles
(*particlesArrayInLeafOrder) = new ParticleClass[(*workingSize)];
for(FSize idxPart = 0 ; idxPart < (*workingSize) ; ++idxPart){
memcpy(&(*particlesArrayInLeafOrder)[idxPart],&workingArray[idxPart].particle,sizeof(ParticleClass));
memcpy(&(*particlesArrayInLeafOrder)[idxPart],&(*workingArray)[idxPart].particle,sizeof(ParticleClass));
}
// Assign the number of leaf
(*leavesSize) = leavesInfo.getSize();
......@@ -363,8 +376,9 @@ public:
const std::vector<FEqualize::Package> packsToSend = FEqualize::GetPackToSend(myCurrentInter, allObjectives);
FAssertLF((currentNbLeaves == 0 && packsToSend.size() == 0) ||
(packsToSend.size() && FSize(packsToSend[packsToSend.size()-1].elementTo) == currentNbLeaves));
(currentNbLeaves != 0 && packsToSend.size() && FSize(packsToSend[packsToSend.size()-1].elementTo) == currentNbLeaves));
FLOG(if(VerboseLog) FLog::Controller << "SCALFMM-DEBUG [" << communicator.processId() << "] Previous currentNbLeaves (" << currentNbLeaves << ")\n"; FLog::Controller.flush(); );
FLOG(if(VerboseLog) FLog::Controller << "SCALFMM-DEBUG [" << communicator.processId() << "] Get my interval (" << packsToSend.size() << ")\n"; FLog::Controller.flush(); );
FLOG(if(VerboseLog) FLog::Controller << "SCALFMM-DEBUG [" << communicator.processId() << "] Send data\n"; FLog::Controller.flush(); );
......@@ -379,6 +393,8 @@ public:
const FEqualize::Package& pack = packsToSend[idxPack];
if(idxPack != 0) FAssertLF(packsToSend[idxPack].elementFrom == packsToSend[idxPack-1].elementTo);
FAssertLF(FSize(pack.elementTo) <= FSize(currentNbParts));
FAssertLF(pack.elementFrom <= pack.elementTo);
const long long int nbPartsPerPackToSend = leavesOffsetInParticles[pack.elementTo]-leavesOffsetInParticles[pack.elementFrom];
totalSend += nbPartsPerPackToSend;
......@@ -388,7 +404,7 @@ public:
<< " from " << pack.elementFrom << " to " << pack.elementTo << " \n"; FLog::Controller.flush(); );
// Send the size of the data
requestsNbParts.emplace_back();
FMpi::MpiAssert(MPI_Isend(&nbPartsPerPackToSend,1,MPI_LONG_LONG_INT,pack.idProc,
FMpi::MpiAssert(MPI_Isend(const_cast<long long int*>(&nbPartsPerPackToSend),1,MPI_LONG_LONG_INT,pack.idProc,
FMpi::TagExchangeIndexs, communicator.getComm(), &requestsNbParts.back()),__LINE__);
}
......@@ -552,7 +568,9 @@ public:
// From ParticleClass get array of IndexedParticle sorted
GetSortedParticlesFromArray(communicator, originalParticlesArray, originalNbParticles, sortingType, boxCenter, boxWidth, treeHeight,
&sortedParticlesArray, &nbParticlesInArray);
FLOG( FLog::Controller << "[" << communicator.processId() << "] Particles Distribution: " << "\t GetSortedParticlesFromArray is over (" << timer.tacAndElapsed() << "s)\n"; FLog::Controller.flush(); );
FLOG( FLog::Controller << "[" << communicator.processId() << "] Particles Distribution: "
<< "\t GetSortedParticlesFromArray is over (" << timer.tacAndElapsed() << "s) "
<< nbParticlesInArray << " particles\n"; FLog::Controller.flush(); );
FLOG( timer.tic() );
// for(int idx = 0 ; idx < nbParticlesInArray ; ++idx){
......@@ -563,7 +581,7 @@ public:
FSize * leavesOffsetInParticles = nullptr;
FSize nbLeaves = 0;
// Merge the leaves
MergeSplitedLeaves(communicator, sortedParticlesArray, &nbParticlesInArray, &leavesOffsetInParticles, &particlesArrayInLeafOrder, &nbLeaves);
MergeSplitedLeaves(communicator, &sortedParticlesArray, &nbParticlesInArray, &leavesOffsetInParticles, &particlesArrayInLeafOrder, &nbLeaves);
delete[] sortedParticlesArray;
// for(int idx = 0 ; idx < nbParticlesInArray ; ++idx){
......
......@@ -107,7 +107,7 @@ public:
* This class is used to gather the usual methods related to identifying an
* MPI communicator.
*/
class FComm : public FNoCopyable {
class FComm {
int rank; ///< rank related to the comm
int nbProc; ///< nb proc in this group
......@@ -130,6 +130,26 @@ public:
reset();
}
/// Constructor : duplicates the given communicator
FComm(const FComm& inCommunicator ) {
FMpi::Assert( MPI_Comm_dup(inCommunicator.communicator, &communicator), __LINE__ , "comm dup");
FMpi::Assert( MPI_Comm_group(communicator, &group), __LINE__ , "comm group");
reset();
}
FComm& operator=(const FComm& inCommunicator ) {
FMpi::Assert( MPI_Comm_free(&communicator), __LINE__ );
FMpi::Assert( MPI_Group_free(&group), __LINE__ );
FMpi::Assert( MPI_Comm_dup(inCommunicator.communicator, &communicator), __LINE__ , "comm dup");
FMpi::Assert( MPI_Comm_group(communicator, &group), __LINE__ , "comm group");
reset();
return *this;
}
/// Frees communicator and group
virtual ~FComm(){
FMpi::Assert( MPI_Comm_free(&communicator), __LINE__ );
......@@ -248,6 +268,35 @@ public:
delete[] procsIdArray ;
}
/** Change the group, create one groupd where processInGroup[i] != 0
* and another where processInGroup[i] == 0