Commit c89d2283 authored by berenger-bramas's avatar berenger-bramas

Put the assert function into the FMPI class.

git-svn-id: svn+ssh://scm.gforge.inria.fr/svn/scalfmm/scalfmm/trunk@266 2616d619-271b-44dc-8df4-d4a8f33a7222
parent 62a62a92
......@@ -20,16 +20,6 @@ class FOctreeArrangerProc : FAssertable {
MortonIndex max;
};
/** assert if needed */
static void mpiassert(const int test, const unsigned line, const char* const message = 0){
if(test != MPI_SUCCESS){
printf("[ERROR] Test failled at line %d, result is %d", line, test);
if(message) printf(", message: %s",message);
printf("\n");
fflush(stdout);
MPI_Abort(MPI_COMM_WORLD, int(line) );
}
}
/** Find the interval that contains mindex */
int getInterval(const MortonIndex mindex, const int size, const Interval intervals[]) const{
......@@ -71,7 +61,7 @@ public:
myLastInterval.max = octreeIterator.getCurrentGlobalIndex();
// We get the min/max indexes from each procs
mpiassert( MPI_Allgather( &myLastInterval, sizeof(Interval), MPI_BYTE, intervals, sizeof(Interval), MPI_BYTE, comm.getComm()), __LINE__ );
FMpi::MpiAssert( MPI_Allgather( &myLastInterval, sizeof(Interval), MPI_BYTE, intervals, sizeof(Interval), MPI_BYTE, comm.getComm()), __LINE__ );
// increase interval in the empty morton index
intervals[0].min = 0;
......@@ -129,7 +119,7 @@ public:
}
// say who send to who
int*const allcounter = new int[comm.processCount()*comm.processCount()];
mpiassert( MPI_Allgather( counter, comm.processCount(), MPI_INT, allcounter, comm.processCount(), MPI_INT, comm.getComm()), __LINE__ );
FMpi::MpiAssert( MPI_Allgather( counter, comm.processCount(), MPI_INT, allcounter, comm.processCount(), MPI_INT, comm.getComm()), __LINE__ );
// prepare buffer to receive
long long int sumToRecv = 0;
......@@ -145,7 +135,7 @@ public:
// send
for(int idxProc = 0 ; idxProc < comm.processCount() ; ++idxProc){
if(idxProc != comm.processId() && allcounter[idxProc * comm.processCount() + comm.processId()]){
mpiassert( MPI_Irecv(&toReceive[indexToReceive[idxProc]], allcounter[idxProc * comm.processCount() + comm.processId()] * sizeof(ParticleClass), MPI_BYTE,
FMpi::MpiAssert( MPI_Irecv(&toReceive[indexToReceive[idxProc]], allcounter[idxProc * comm.processCount() + comm.processId()] * sizeof(ParticleClass), MPI_BYTE,
idxProc, 0, comm.getComm(), &requests[iterRequests++]), __LINE__ );
hasToRecvFrom += 1;
}
......@@ -156,7 +146,7 @@ public:
// recv
for(int idxProc = 0 ; idxProc < comm.processCount() ; ++idxProc){
if(idxProc != comm.processId() && toMove[idxProc].getSize()){
mpiassert( MPI_Isend(toMove[idxProc].data(), toMove[idxProc].getSize() * sizeof(ParticleClass), MPI_BYTE,
FMpi::MpiAssert( MPI_Isend(toMove[idxProc].data(), toMove[idxProc].getSize() * sizeof(ParticleClass), MPI_BYTE,
idxProc, 0, comm.getComm(), &requests[iterRequests++]), __LINE__ );
}
}
......@@ -176,7 +166,7 @@ public:
MPI_Status status;
while( hasToRecvFrom ){
int done = 0;
mpiassert( MPI_Waitany( iterRequests, requests, &done, &status ), __LINE__ );
FMpi::MpiAssert( MPI_Waitany( iterRequests, requests, &done, &status ), __LINE__ );
if( done < limitRecvSend ){
const int source = status.MPI_SOURCE;
for(long long int idxPart = indexToReceive[source] ; idxPart < indexToReceive[source+1] ; ++idxPart){
......@@ -216,7 +206,7 @@ public:
}
// wait all send
mpiassert( MPI_Waitall( iterRequests, requests, MPI_STATUSES_IGNORE), __LINE__ );
FMpi::MpiAssert( MPI_Waitall( iterRequests, requests, MPI_STATUSES_IGNORE), __LINE__ );
delete[] intervals;
delete[] toMove;
......
......@@ -62,16 +62,6 @@ class FFmmAlgorithmThreadProc : protected FAssertable {
Interval*const workingIntervalsPerLevel;
static void mpiassert(const int test, const unsigned line, const char* const message = 0){
if(test != MPI_SUCCESS){
printf("[ERROR] Test failled at line %d, result is %d", line, test);
if(message) printf(", message: %s",message);
printf("\n");
fflush(stdout);
MPI_Abort(MPI_COMM_WORLD, int(line) );
}
}
Interval& getWorkingInterval(const int level, const int proc){
return workingIntervalsPerLevel[OctreeHeight * proc + level];
}
......@@ -146,7 +136,7 @@ public:
fassert(iterArray, "iterArray bad alloc", __LINE__, __FILE__);
// We get the min/max indexes from each procs
mpiassert( MPI_Allgather( &myLastInterval, sizeof(Interval), MPI_BYTE, intervals, sizeof(Interval), MPI_BYTE, MPI_COMM_WORLD), __LINE__ );
FMpi::MpiAssert( MPI_Allgather( &myLastInterval, sizeof(Interval), MPI_BYTE, intervals, sizeof(Interval), MPI_BYTE, MPI_COMM_WORLD), __LINE__ );
Interval myIntervals[OctreeHeight];
myIntervals[OctreeHeight - 1] = myLastInterval;
......@@ -172,7 +162,7 @@ public:
}
// We get the min/max indexes from each procs
mpiassert( MPI_Allgather( myIntervals, sizeof(Interval) * OctreeHeight, MPI_BYTE,
FMpi::MpiAssert( MPI_Allgather( myIntervals, sizeof(Interval) * OctreeHeight, MPI_BYTE,
workingIntervalsPerLevel, sizeof(Interval) * OctreeHeight, MPI_BYTE, MPI_COMM_WORLD), __LINE__ );
}
......@@ -550,7 +540,7 @@ private:
// what the will send to who
int globalReceiveMap[nbProcess * nbProcess * OctreeHeight];
memset(globalReceiveMap, 0, sizeof(int) * nbProcess * nbProcess * OctreeHeight);
mpiassert( MPI_Allgather( indexToSend, nbProcess * OctreeHeight, MPI_INT, globalReceiveMap, nbProcess * OctreeHeight, MPI_INT, MPI_COMM_WORLD), __LINE__ );
FMpi::MpiAssert( MPI_Allgather( indexToSend, nbProcess * OctreeHeight, MPI_INT, globalReceiveMap, nbProcess * OctreeHeight, MPI_INT, MPI_COMM_WORLD), __LINE__ );
FDEBUG(gatherCounter.tac());
......@@ -588,7 +578,7 @@ private:
toSend[idxLevel * nbProcess + idxProc][idxLeaf].getCurrentCell()->serializeUp(sendBuffer[idxLevel * nbProcess + idxProc][idxLeaf].data);
}
mpiassert( MPI_Isend( sendBuffer[idxLevel * nbProcess + idxProc], toSendAtProcAtLevel * sizeof(CellToSend) , MPI_BYTE ,
FMpi::MpiAssert( MPI_Isend( sendBuffer[idxLevel * nbProcess + idxProc], toSendAtProcAtLevel * sizeof(CellToSend) , MPI_BYTE ,
idxProc, FMpi::TagLast + idxLevel, MPI_COMM_WORLD, &requests[iterRequest++]) , __LINE__ );
}
......@@ -596,7 +586,7 @@ private:
if(toReceiveFromProcAtLevel){
recvBuffer[idxLevel * nbProcess + idxProc] = new CellToSend[toReceiveFromProcAtLevel];
mpiassert( MPI_Irecv(recvBuffer[idxLevel * nbProcess + idxProc], toReceiveFromProcAtLevel * sizeof(CellToSend), MPI_BYTE,
FMpi::MpiAssert( MPI_Irecv(recvBuffer[idxLevel * nbProcess + idxProc], toReceiveFromProcAtLevel * sizeof(CellToSend), MPI_BYTE,
idxProc, FMpi::TagLast + idxLevel, MPI_COMM_WORLD, &requests[iterRequest++]) , __LINE__ );
}
}
......@@ -994,7 +984,7 @@ private:
}
FDEBUG(gatherCounter.tic());
mpiassert( MPI_Allgather( partsToSend, nbProcess, MPI_INT, globalReceiveMap, nbProcess, MPI_INT, MPI_COMM_WORLD), __LINE__ );
FMpi::MpiAssert( MPI_Allgather( partsToSend, nbProcess, MPI_INT, globalReceiveMap, nbProcess, MPI_INT, MPI_COMM_WORLD), __LINE__ );
FDEBUG(gatherCounter.tac());
......@@ -1003,7 +993,7 @@ private:
if(globalReceiveMap[idxProc * nbProcess + idProcess]){
recvBuffer[idxProc] = reinterpret_cast<ParticleClass*>(new char[sizeof(ParticleClass) * globalReceiveMap[idxProc * nbProcess + idProcess]]);
mpiassert( MPI_Irecv(recvBuffer[idxProc], globalReceiveMap[idxProc * nbProcess + idProcess]*sizeof(ParticleClass), MPI_BYTE,
FMpi::MpiAssert( MPI_Irecv(recvBuffer[idxProc], globalReceiveMap[idxProc * nbProcess + idProcess]*sizeof(ParticleClass), MPI_BYTE,
idxProc, FMpi::TagFmmP2P, MPI_COMM_WORLD, &requests[iterRequest++]) , __LINE__ );
}
}
......@@ -1020,7 +1010,7 @@ private:
currentIndex += toSend[idxProc][idxLeaf].getCurrentListSrc()->getSize();
}
mpiassert( MPI_Isend( sendBuffer[idxProc], sizeof(ParticleClass) * partsToSend[idxProc] , MPI_BYTE ,
FMpi::MpiAssert( MPI_Isend( sendBuffer[idxProc], sizeof(ParticleClass) * partsToSend[idxProc] , MPI_BYTE ,
idxProc, FMpi::TagFmmP2P, MPI_COMM_WORLD, &requests[iterRequest++]) , __LINE__ );
}
......
......@@ -283,6 +283,16 @@ public:
return int(double(position)/step);
}
/** assert if mpi error */
static void MpiAssert(const int test, const unsigned line, const char* const message = 0){
if(test != MPI_SUCCESS){
printf("[ERROR] Test failled at line %d, result is %d", line, test);
if(message) printf(", message: %s",message);
printf("\n");
fflush(stdout);
MPI_Abort(MPI_COMM_WORLD, int(line) );
}
}
private:
/** The original communicator */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment