Commit 8067ae8b authored by COULAUD Olivier's avatar COULAUD Olivier

Remove some errors+warning with gcc 8

parent 98cc61e5
......@@ -1913,8 +1913,8 @@ class basic_json
@since version 1.0.0
*/
basic_json(const value_t value_type)
: m_type(value_type), m_value(value_type)
basic_json(const value_t value_type1)
: m_type(value_type1), m_value(value_type1)
{
assert_invariant();
}
......
......@@ -28,15 +28,14 @@ The following are optional:
- Custom BLAS, FFT implementations.
- [StarPU](http://starpu.gforge.inria.fr/) for the relevant FMM implementations.
### To get and Build ScalFMM
To obtain ScalFMM (develop branch) and its git submodules do
### Get and Build ScalFMM
To use last development states of ScalFMM, please clone the develop
branch. Note that ScalFMM contains a git submodule `morse_cmake`.
To get sources please use these commands:
``` bash
git clone --recursive git@gitlab.inria.fr:solverstack/ScalFMM.git -b develop
```
or
```bash
git clone git@gitlab.inria.fr:solverstack/ScalFMM.git
cd ScalFMM
......@@ -55,9 +54,9 @@ The build may be configured after the first CMake invocation using, for instance
```bash
# Still in the Build folder
ccmake .
ccmake ../
# Or
cmake-gui .
cmake-gui ../
```
The binaries are then compiled calling `make`. They can be found in `scalfmm/Build/Tests/{Release,Debug}/...`
......
......@@ -3,6 +3,7 @@
#define FBUFFERREADER_HPP
#include <memory>
#include <algorithm>
#include "FAbstractBuffer.hpp"
#include "FBufferWriter.hpp"
#include "Utils/FAssert.hpp"
......@@ -213,6 +214,7 @@ public :
template <class T>
void fillArray(T* const inArray, const FSize count){
FAssertLF(currentIndex + FSize(sizeof(T))*count <= arrayCapacity );
// std::copy(&(array[currentIndex]),&(array[currentIndex])+count, inArray);
memcpy(inArray, &array[currentIndex], sizeof(T)*count);
currentIndex += sizeof(T)*count;
}
......
......@@ -4,36 +4,39 @@
#define SCALFMM_DISTRIBUTED_ALGORITHM
#include <omp.h>
#include <algorithm>
#include <vector>
#include <memory>
//#include <sys/time.h>
//
#ifdef _OPENMP
#include <omp.h>
#endif
//
#include "Utils/FGlobal.hpp"
#include "Utils/FAssert.hpp"
#include "Utils/FLog.hpp"
#include "Utils/FTic.hpp"
#include "Utils/FAlgorithmTimers.hpp"
#include "Utils/FGlobal.hpp"
#include "../Containers/FBoolArray.hpp"
#include "../Containers/FOctree.hpp"
#include "../Containers/FLightOctree.hpp"
#include "Utils/FEnv.hpp"
#include "Utils/FMpi.hpp"
#include "../Containers/FBufferWriter.hpp"
#include "../Containers/FBufferReader.hpp"
#include "../Containers/FVector.hpp"
#include "Containers/FVector.hpp"
#include "Containers/FBoolArray.hpp"
#include "Containers/FOctree.hpp"
#include "Containers/FLightOctree.hpp"
#include "Utils/FMpi.hpp"
#include <sys/time.h>
#include "Containers/FBufferWriter.hpp"
#include "Containers/FBufferReader.hpp"
#include "FCoreCommon.hpp"
#include "FP2PExclusion.hpp"
#include <memory>
#include <vector>
#include "Utils/FAlgorithmTimers.hpp"
/**
* @author Berenger Bramas (berenger.bramas@inria.fr)
......@@ -66,21 +69,21 @@ private:
OctreeClass* const tree; ///< The octree to work on
KernelClass** kernels; ///< The kernels
const FMpi::FComm comm; ///< MPI comm
FMpi::FComm fcomCompute;
const int OctreeHeight; ///< Tree height
/// Used to store pointers to cells/leafs to work with
typename OctreeClass::Iterator* iterArray;
/// Used to store pointers to cells/leafs to send/rcv
typename OctreeClass::Iterator* iterArrayComm;
typename OctreeClass::Iterator* iterArray; ///< Used to store pointers to cells/leafs to work with
typename OctreeClass::Iterator* iterArrayComm; ///< Used to store pointers to cells/leafs to send/rcv
const FMpi::FComm comm; ///< MPI communicator
FMpi::FComm fcomCompute;
int numberOfLeafs; ///< To store the size at the previous level
const int MaxThreads; ///< Max number of thread allowed by openmp
const int nbProcessOrig; ///< Process count
const int idProcessOrig; ///< Current process id
int nbProcess; ///< Process count
int idProcess; ///< Current process id
const int OctreeHeight; ///< Tree height
int nbProcess; ///< Process count
int idProcess; ///< Current process id
const int nbProcessOrig; ///< Process count
const int idProcessOrig; ///< Current process id
const int userChunkSize;
const int leafLevelSeparationCriteria;
......@@ -106,8 +109,6 @@ private:
const Interval& getWorkingInterval( int level, int proc) const {
return workingIntervalsPerLevel[OctreeHeight * proc + level];
}
/// Does \a procIdx have work at given \a idxLevel
/** i.e. does it hold cells and is responsible of them ? */
bool procHasWorkAtLevel(const int idxLevel , const int idxProc) const {
......@@ -125,21 +126,23 @@ private:
}
public:
/// Get an interval from a process id and tree level
// Interval& getWorkingInterval( int level, int proc){
// return workingIntervalsPerLevel[OctreeHeight * proc + level];
// }
// /// Get an interval from a process id and tree level
// const Interval& getWorkingInterval( int level, int proc) const {
// return workingIntervalsPerLevel[OctreeHeight * proc + level];
// }
/// Get current process interval at given \a level
///
/// \brief getWorkingInterval
/// \param level level in th tree
/// \return the interval from current process at tree level
///
Interval& getWorkingInterval( int level){
return getWorkingInterval(level, idProcess);
}
/// Build and dill vector of the MortonIndex Distribution at Leaf level
/// p = mpi process id then
/// [mortonLeafDistribution[2*p], mortonLeafDistribution[2*p+1] is the morton index shared by process p
/// Get the Morton index Distribution at the leaf level
///
/// Fill the vector mortonDistribution
///
/// p = mpi process id then
/// Processor p owns indexes between [mortonLeafDistribution[2*p], mortonLeafDistribution[2*p]+1]
///
/// parameter[out] mortonLeafDistribution
///
void getMortonLeafDistribution(std::vector<MortonIndex> & mortonLeafDistribution) final {
mortonLeafDistribution.resize(2*nbProcess) ;
auto level = OctreeHeight - 1;
......@@ -148,14 +151,48 @@ public:
mortonLeafDistribution[2*p] = inter.leftIndex;
mortonLeafDistribution[2*p+1] = inter.rightIndex;
}
}
// /// Build and dill vector of the MortonIndex Distribution at Leaf level
// /// p = mpi process id then
// /// [mortonLeafDistribution[2*p], mortonLeafDistribution[2*p+1] is the morton index shared by process p
// void getMortonLeafDistribution(std::vector<MortonIndex> & mortonLeafDistribution) final {
// mortonLeafDistribution.resize(2*nbProcess) ;
// auto level = OctreeHeight - 1;
// for (int p=0 ; p< nbProcess ; ++p ){
// auto inter = this->getWorkingInterval(level, p );
// mortonLeafDistribution[2*p] = inter.leftIndex;
// mortonLeafDistribution[2*p+1] = inter.rightIndex;
// }
// }
///
/// \brief setKernel
/// \param inKernels a pointer on the computational kernel used in the algorithm
///
/// @todo move it in private section
void setKernel(KernelClass*const inKernels){
this->kernels = new KernelClass*[MaxThreads]{};
#pragma omp parallel num_threads(MaxThreads)
{
#pragma omp critical (InitFFmmAlgorithmThreadProcPeriodic)
{
this->kernels[omp_get_thread_num()] = new KernelClass(*inKernels);
}
}
}
///
/// \brief getPtrOnMortonIndexAtLeaf
/// \return
///
const MortonIndex * getPtrOnMortonIndexAtLeaf() {
return &workingIntervalsPerLevel[0].leftIndex ;
}
/// Does the current process has some work at this level ?
bool hasWorkAtLevel( int level){
///
/// \brief hasWorkAtLevel - Does the current process have some work at this level ?
/// \param level
/// \return true if the current process have some work at this level
bool hasWorkAtLevel( int level){
return idProcess == 0 || (getWorkingInterval(level, idProcess - 1).rightIndex) < (getWorkingInterval(level, idProcess).rightIndex);
}
......@@ -171,17 +208,17 @@ public:
const int inLeafLevelSeperationCriteria = 1) :
tree(inTree),
kernels(nullptr),
comm(inComm),
fcomCompute(inComm),
OctreeHeight(tree->getHeight()),
iterArray(nullptr),
iterArrayComm(nullptr),
comm(inComm),
fcomCompute(inComm),
numberOfLeafs(0),
MaxThreads(FEnv::GetValue("SCALFMM_ALGO_NUM_THREADS",omp_get_max_threads())),
nbProcessOrig(inComm.processCount()),
idProcessOrig(inComm.processId()),
nbProcess(0),
idProcess(0),
OctreeHeight(tree->getHeight()),
nbProcessOrig(inComm.processCount()),
idProcessOrig(inComm.processId()),
userChunkSize(inUserChunkSize),
leafLevelSeparationCriteria(inLeafLevelSeperationCriteria),
intervals(new Interval[inComm.processCount()]),
......@@ -189,6 +226,7 @@ public:
FAssertLF(tree, "tree cannot be null");
FAssertLF(leafLevelSeparationCriteria < 3, "Separation criteria should be < 3");
// this->setKernel(inKernels) ;
this->kernels = new KernelClass*[MaxThreads];
#pragma omp parallel num_threads(MaxThreads)
{
......@@ -1703,7 +1741,7 @@ protected:
}
}
// Wait the come to finish (and the previous computation also)
// Wait the communications to finish (and the previous computation also)
#pragma omp barrier
......
......@@ -9,3 +9,4 @@ constexpr const char* FAlgorithmTimers::P2PTimer;
constexpr const char* FAlgorithmTimers::M2PTimer;
constexpr const char* FAlgorithmTimers::P2LTimer;
constexpr const char* FAlgorithmTimers::NearTimer;
constexpr const char* FAlgorithmTimers::FarTimer;
......@@ -27,7 +27,8 @@ public:
static constexpr const char* M2PTimer = "M2P";
static constexpr const char* P2LTimer = "P2L";
static constexpr const char* NearTimer = "Near";
enum {nbTimers = 9};
static constexpr const char* FarTimer = "Far";
enum {nbTimers = 10};
/// Timers
FTimerMap Timers;
......@@ -45,7 +46,7 @@ public:
double res = 0;
try {
res = Timers.at(TimerName).elapsed();
} catch(std::out_of_range) {
} catch(std::out_of_range&) {
res = 0;
}
return res;
......
......@@ -15,7 +15,7 @@
#ifndef FCOMPLEXE_HPP
#define FCOMPLEXE_HPP
#include <array>
#include "FMath.hpp"
/**
......@@ -32,7 +32,8 @@
*/
template <class FReal>
class FComplex {
FReal complex[2]; //< Real & Imaginary
//std::array<FReal,2> complex; //< Real & Imaginary
FReal complex[2]; //< Real & Imaginary
public:
/** Default Constructor (set complex[0]&imaginary to 0) */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment