Commit 37209d54 authored by Martin Khannouz's avatar Martin Khannouz Committed by Berenger Bramas

Change the particule are generated in implicit test.

That way no need of using a file to exchange them. Split the mpi job in
two. Change a little bit informations printed in the job.
parent b21c42e2
This diff is collapsed.
......@@ -41,7 +41,7 @@
#include <memory>
//#define RANDOM_PARTICLES
#define RANDOM_PARTICLES
int main(int argc, char* argv[]){
const FParameterNames LocalOptionBlocSize { {"-bs"}, "The size of the block of the blocked tree"};
......
......@@ -65,7 +65,7 @@ using namespace std;
// FFmmAlgorithmTask FFmmAlgorithmThread
typedef FFmmAlgorithm<OctreeClass, CellClass, ContainerClass, KernelClass, LeafClass > FmmClass;
#define LOAD_FILE
//#define LOAD_FILE
#ifndef LOAD_FILE
typedef FRandomLoader<FReal> LoaderClass;
#else
......@@ -80,13 +80,9 @@ int main(int argc, char* argv[]){
{"-bs"},
"The size of the block of the blocked tree"
};
const FParameterNames Mapping {
{"-map"} ,
"mapping  \\o/."
};
FHelpDescribeAndExit(argc, argv, "Test the blocked tree by counting the particles.",
FParameterDefinitions::OctreeHeight, FParameterDefinitions::NbParticles,
FParameterDefinitions::OctreeSubHeight, FParameterDefinitions::InputFile, LocalOptionBlocSize, Mapping);
FParameterDefinitions::OctreeSubHeight, FParameterDefinitions::InputFile, LocalOptionBlocSize);
// Get params
const int NbLevels = FParameters::getValue(argc,argv,FParameterDefinitions::OctreeHeight.options, 5);
......@@ -95,31 +91,46 @@ int main(int argc, char* argv[]){
#ifndef STARPU_USE_MPI
cout << "Pas de mpi -_-\" " << endl;
#endif
int mpi_rank, nproc;
FMpi mpiComm(argc,argv);
mpi_rank = mpiComm.global().processId();
nproc = mpiComm.global().processCount();
#ifndef LOAD_FILE
const FSize NbParticles = FParameters::getValue(argc,argv,FParameterDefinitions::NbParticles.options, FSize(10000));
LoaderClass loader(NbParticles, 1.0, FPoint<FReal>(0,0,0), 0);
#else
// Load the particles
const char* const filename = FParameters::getStr(argc,argv,FParameterDefinitions::InputFile.options, "../Data/test20k.fma");
LoaderClass loader(filename);
#endif
int mpi_rank, nproc;
FMpi mpiComm(argc,argv);
mpi_rank = mpiComm.global().processId();
nproc = mpiComm.global().processCount();
FAssertLF(loader.isOpen());
const FSize NbParticles = loader.getNumberOfParticles();
#endif
// Usual octree
OctreeClass tree(NbLevels, FParameters::getValue(argc,argv,FParameterDefinitions::OctreeSubHeight.options, 2),
loader.getBoxWidth(), loader.getCenterOfBox());
FTestParticleContainer<FReal> allParticles;
FPoint<FReal> * allParticlesToSort = new FPoint<FReal>[loader.getNumberOfParticles()];
FPoint<FReal> * allParticlesToSort = new FPoint<FReal>[NbParticles*mpiComm.global().processCount()];
//Fill particles
#ifndef LOAD_FILE
for(int i = 0; i < mpiComm.global().processCount(); ++i){
LoaderClass loader(NbParticles, 1.0, FPoint<FReal>(0,0,0), i);
FAssertLF(loader.isOpen());
for(FSize idxPart = 0 ; idxPart < NbParticles ; ++idxPart){
loader.fillParticle(&allParticlesToSort[(NbParticles*i) + idxPart]);//Same with file or not
}
}
LoaderClass loader(NbParticles*mpiComm.global().processCount(), 1.0, FPoint<FReal>(0,0,0));
#else
for(FSize idxPart = 0 ; idxPart < loader.getNumberOfParticles() ; ++idxPart){
loader.fillParticle(&allParticlesToSort[idxPart]);//Same with file or not
}
#endif
// Usual octree
OctreeClass tree(NbLevels, FParameters::getValue(argc,argv,FParameterDefinitions::OctreeSubHeight.options, 2),
loader.getBoxWidth(), loader.getCenterOfBox());
std::vector<MortonIndex> distributedMortonIndex;
vector<vector<int>> sizeForEachGroup;
FTestParticleContainer<FReal> allParticles;
sortParticle(allParticlesToSort, NbLevels, groupSize, sizeForEachGroup, distributedMortonIndex, loader, nproc);
for(FSize idxPart = 0 ; idxPart < loader.getNumberOfParticles() ; ++idxPart){
allParticles.push(allParticlesToSort[idxPart]);
......
......@@ -59,7 +59,7 @@ using namespace std;
typedef FStarPUCpuWrapper<typename GroupOctreeClass::CellGroupClass, GroupCellClass, GroupKernelClass, typename GroupOctreeClass::ParticleGroupClass, GroupContainerClass> GroupCpuWrapper;
typedef FGroupTaskStarPUImplicitAlgorithm<GroupOctreeClass, typename GroupOctreeClass::CellGroupClass, GroupKernelClass, typename GroupOctreeClass::ParticleGroupClass, GroupCpuWrapper > GroupAlgorithm;
#define LOAD_FILE
//#define LOAD_FILE
#ifndef LOAD_FILE
typedef FRandomLoader<FReal> LoaderClass;
#else
......@@ -86,24 +86,38 @@ int main(int argc, char* argv[]){
#ifndef STARPU_USE_MPI
cout << "Pas de mpi -_-\" " << endl;
#endif
int mpi_rank, nproc;
FMpi mpiComm(argc,argv);
mpi_rank = mpiComm.global().processId();
nproc = mpiComm.global().processCount();
#ifndef LOAD_FILE
const FSize NbParticles = FParameters::getValue(argc,argv,FParameterDefinitions::NbParticles.options, FSize(10000));
LoaderClass loader(NbParticles, 1.0, FPoint<FReal>(0,0,0), 0);
#else
// Load the particles
const char* const filename = FParameters::getStr(argc,argv,FParameterDefinitions::InputFile.options, "../Data/test20k.fma");
LoaderClass loader(filename);
#endif
int mpi_rank, nproc;
FMpi mpiComm(argc,argv);
mpi_rank = mpiComm.global().processId();
nproc = mpiComm.global().processCount();
FAssertLF(loader.isOpen());
const FSize NbParticles = loader.getNumberOfParticles();
#endif
FPoint<FReal> * allParticlesToSort = new FPoint<FReal>[loader.getNumberOfParticles()];
FPoint<FReal> * allParticlesToSort = new FPoint<FReal>[NbParticles*mpiComm.global().processCount()];
//Fill particles
#ifndef LOAD_FILE
for(int i = 0; i < mpiComm.global().processCount(); ++i){
LoaderClass loader(NbParticles, 1.0, FPoint<FReal>(0,0,0), i);
FAssertLF(loader.isOpen());
for(FSize idxPart = 0 ; idxPart < NbParticles ; ++idxPart){
loader.fillParticle(&allParticlesToSort[(NbParticles*i) + idxPart]);//Same with file or not
}
}
LoaderClass loader(NbParticles*mpiComm.global().processCount(), 1.0, FPoint<FReal>(0,0,0));
#else
for(FSize idxPart = 0 ; idxPart < loader.getNumberOfParticles() ; ++idxPart){
loader.fillParticle(&allParticlesToSort[idxPart]);//Same with file or not
}
#endif
std::vector<MortonIndex> distributedMortonIndex;
vector<vector<int>> sizeForEachGroup;
......
......@@ -140,48 +140,6 @@ int main(int argc, char* argv[]){
mpiComm.global().getComm()), __LINE__);
}
//Save particles in a file
if(mpiComm.global().processId() == 0){
std::cerr << "Exchange particle to create the file" << std::endl;
std::vector<TestParticle*> particlesGathered;
std::vector<int> sizeGathered;
//Ajout des mes particules
int sizeofParticle = sizeof(TestParticle)*myParticles.getSize();
sizeGathered.push_back(sizeofParticle);
particlesGathered.push_back(new TestParticle[sizeofParticle]);
memcpy(particlesGathered.back(), myParticles.data(), sizeofParticle);
//Recupération des particules des autres
for(int i = 1; i < mpiComm.global().processCount(); ++i)
{
int sizeReceive;
MPI_Recv(&sizeReceive, sizeof(sizeReceive), MPI_BYTE, i, 0, mpiComm.global().getComm(), MPI_STATUS_IGNORE);
sizeGathered.push_back(sizeReceive);
particlesGathered.push_back(new TestParticle[sizeReceive]);
MPI_Recv(particlesGathered.back(), sizeReceive, MPI_BYTE, i, 0, mpiComm.global().getComm(), MPI_STATUS_IGNORE);
}
int sum = 0;
for(int a : sizeGathered)
sum += a/sizeof(TestParticle);
if(sum != totalNbParticles)
std::cerr << "Erreur sum : " << sum << " instead of " << totalNbParticles << std::endl;
//Store in that bloody file
FFmaGenericWriter<FReal> writer("canard.fma");
writer.writeHeader(loader.getCenterOfBox(), loader.getBoxWidth(),totalNbParticles, particles[0]);
for(unsigned int i = 0; i < particlesGathered.size(); ++i)
writer.writeArrayOfParticles(particlesGathered[i], sizeGathered[i]/sizeof(TestParticle));
for(TestParticle* ptr : particlesGathered)
delete ptr;
std::cerr << "Done exchanging !" << std::endl;
}
else{
int sizeofParticle = sizeof(TestParticle)*myParticles.getSize();
MPI_Send(&sizeofParticle, sizeof(sizeofParticle), MPI_BYTE, 0, 0, mpiComm.global().getComm());//Send size
MPI_Send(myParticles.data(), sizeofParticle, MPI_BYTE, 0, 0, mpiComm.global().getComm());
MPI_Send(const_cast<MortonIndex*>(&leftLimite), sizeof(leftLimite), MPI_BYTE, 0, 0, mpiComm.global().getComm());
MPI_Send(const_cast<MortonIndex*>(&myLeftLimite), sizeof(myLeftLimite), MPI_BYTE, 0, 0, mpiComm.global().getComm());
}
// Put the data into the tree
GroupOctreeClass groupedTree(NbLevels, loader.getBoxWidth(), loader.getCenterOfBox(), groupSize,
&allParticles, true, leftLimite);
......
......@@ -148,48 +148,6 @@ int main(int argc, char* argv[]){
FLOG(std::cout << "My last index is " << leftLimite << "\n");
FLOG(std::cout << "My left limite is " << myLeftLimite << "\n");
//Save particles in a file
if(mpiComm.global().processId() == 0){
std::cerr << "Exchange particle to create the file" << std::endl;
std::vector<TestParticle*> particlesGathered;
std::vector<int> sizeGathered;
//Ajout des mes particules
int sizeofParticle = sizeof(TestParticle)*myParticles.getSize();
sizeGathered.push_back(sizeofParticle);
particlesGathered.push_back(new TestParticle[sizeofParticle]);
memcpy(particlesGathered.back(), myParticles.data(), sizeofParticle);
//Recupération des particules des autres
for(int i = 1; i < mpiComm.global().processCount(); ++i)
{
int sizeReceive;
MPI_Recv(&sizeReceive, sizeof(sizeReceive), MPI_BYTE, i, 0, mpiComm.global().getComm(), MPI_STATUS_IGNORE);
sizeGathered.push_back(sizeReceive);
particlesGathered.push_back(new TestParticle[sizeReceive]);
MPI_Recv(particlesGathered.back(), sizeReceive, MPI_BYTE, i, 0, mpiComm.global().getComm(), MPI_STATUS_IGNORE);
}
int sum = 0;
for(int a : sizeGathered)
sum += a/sizeof(TestParticle);
if(sum != totalNbParticles)
std::cerr << "Erreur sum : " << sum << " instead of " << totalNbParticles << std::endl;
//Store in that bloody file
FFmaGenericWriter<FReal> writer("canard.fma");
writer.writeHeader(loader.getCenterOfBox(), loader.getBoxWidth(),totalNbParticles, allParticles[0]);
for(unsigned int i = 0; i < particlesGathered.size(); ++i)
writer.writeArrayOfParticles(particlesGathered[i], sizeGathered[i]/sizeof(TestParticle));
for(TestParticle* ptr : particlesGathered)
delete ptr;
std::cerr << "Done exchanging !" << std::endl;
}
else{
int sizeofParticle = sizeof(TestParticle)*myParticles.getSize();
MPI_Send(&sizeofParticle, sizeof(sizeofParticle), MPI_BYTE, 0, 0, mpiComm.global().getComm());//Send size
MPI_Send(myParticles.data(), sizeofParticle, MPI_BYTE, 0, 0, mpiComm.global().getComm());
MPI_Send(const_cast<MortonIndex*>(&leftLimite), sizeof(leftLimite), MPI_BYTE, 0, 0, mpiComm.global().getComm());
MPI_Send(const_cast<MortonIndex*>(&myLeftLimite), sizeof(myLeftLimite), MPI_BYTE, 0, 0, mpiComm.global().getComm());
}
// Put the data into the tree
FP2PParticleContainer<FReal> myParticlesInContainer;
for(FSize idxPart = 0 ; idxPart < myParticles.getSize() ; ++idxPart){
......
#!/bin/sh
export SCALFMM_SIMGRIDOUT='scalfmm.out'
export GROUP_SIZE=500
export GROUP_SIZE=50
export TREE_HEIGHT=5
export NB_NODE=16
export NB_NODE=4
#export NB_PARTICLE_PER_NODE=$(( (`awk "BEGIN{print 8 ** ($TREE_HEIGHT-1)}"` / $NB_NODE) ))
export NB_PARTICLE_PER_NODE=15000
export NB_PARTICLE_PER_NODE=5000
export STARPU_NCPU=1
export STARPU_FXT_PREFIX=`pwd`/
echo "GROUP_SIZE=$GROUP_SIZE"
echo "TREE_HEIGHT=$TREE_HEIGHT"
......@@ -12,62 +14,88 @@ echo "NB_NODE=$NB_NODE"
echo "NB_PARTICLE_PER_NODE=$NB_PARTICLE_PER_NODE"
#Compile only what we need
make testBlockedImplicitChebyshev testBlockedMpiChebyshev testBlockedImplicitAlgorithm testBlockedMpiAlgorithm compareDAGmapping -j $((`nproc`*2))
time make testBlockedImplicitChebyshev testBlockedMpiChebyshev testBlockedImplicitAlgorithm testBlockedMpiAlgorithm compareDAGmapping -j $((`nproc`*2))
if [ $? -ne 0 ]; then
exit
fi
#Execute explicit mpi version
sleep 10
mpiexec -n $NB_NODE ./Tests/Release/testBlockedMpiAlgorithm -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT 2>/dev/null
if [ $? -ne 0 ]; then
echo
echo " /!\\Error on explicit"
echo
exit
fi
test_kernel()
{
#Execute explicit mpi version
mpiexec -n $NB_NODE ./Tests/Release/testBlockedMpiAlgorithm -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT
if [ $? -ne 0 ]; then
echo
echo " /!\\Error on explicit"
echo
exit
fi
#Aggregate task information from explicit execution
a=`ls $SCALFMM_SIMGRIDOUT\_*`
rm -f $SCALFMM_SIMGRIDOUT
for i in $a; do
cat $i >> $SCALFMM_SIMGRIDOUT
done
#Get task information
cp -f $SCALFMM_SIMGRIDOUT scalfmm_explicit.out
#Generate mapping for implicite version
#mpiexec -n $NB_NODE ./Tests/Release/generateMapping -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT > /dev/null
#Execute implicit version
sleep 10
mpiexec -n $NB_NODE ./Tests/Release/testBlockedImplicitAlgorithm -f canard.fma -bs $GROUP_SIZE -h $TREE_HEIGHT 2>/dev/null
if [ $? -ne 0 ]; then
echo
echo " /!\\Error on implicit"
echo
exit
fi
#Aggregate task information from explicit execution
a=`ls $SCALFMM_SIMGRIDOUT\_*`
rm -f $SCALFMM_SIMGRIDOUT
for i in $a; do
cat $i >> $SCALFMM_SIMGRIDOUT
rm -f $i
done
#Get task information
cp -f scalfmm.out_0 scalfmm_implicit.out
#Get task information
cp -f $SCALFMM_SIMGRIDOUT scalfmm_explicit.out
#Compare DAGs
./Tests/Release/compareDAGmapping -e scalfmm_explicit.out -i scalfmm_implicit.out -h $TREE_HEIGHT > output
#Execute implicit version
mpiexec -n $NB_NODE ./Tests/Release/testBlockedImplicitAlgorithm -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT
if [ $? -ne 0 ]; then
echo
echo " /!\\Error on implicit"
echo
exit
fi
#Get task information
cp -f $SCALFMM_SIMGRIDOUT\_0 scalfmm_implicit.out
rm -f $SCALFMM_SIMGRIDOUT\_*
#Compare DAGs
./Tests/Release/compareDAGmapping -e scalfmm_explicit.out -i scalfmm_implicit.out -h $TREE_HEIGHT > output
}
chebyshev_kernel()
{
mpiexec -n $NB_NODE ./Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT
if [ $? -ne 0 ]; then
echo
echo " /!\\Error on explicit Chebyshev"
echo
exit
fi
##Aggregate task information from explicit execution
a=`ls $SCALFMM_SIMGRIDOUT\_*`
rm -f $SCALFMM_SIMGRIDOUT
for i in $a; do
cat $i >> $SCALFMM_SIMGRIDOUT
rm -f $i
done
#Get task information
cp -f $SCALFMM_SIMGRIDOUT scalfmm_explicit.out
mpiexec -n $NB_NODE ./Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT
if [ $? -ne 0 ]; then
echo
echo " /!\\Error on implicit Chebyshev"
echo
exit
fi
#Get task information
cp -f $SCALFMM_SIMGRIDOUT\_0 scalfmm_implicit.out
rm -f $SCALFMM_SIMGRIDOUT\_*
#Compare DAGs
./Tests/Release/compareDAGmapping -e scalfmm_explicit.out -i scalfmm_implicit.out -h $TREE_HEIGHT > narval
}
test_kernel
chebyshev_kernel
sleep 10
mpiexec -n $NB_NODE ./Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT 2>/dev/null
if [ $? -ne 0 ]; then
echo
echo " /!\\Error on explicit Chebyshev"
echo
exit
fi
sleep 10
mpiexec -n $NB_NODE ./Tests/Release/testBlockedImplicitChebyshev -f canard.fma -bs $GROUP_SIZE -h $TREE_HEIGHT 2>/dev/null
if [ $? -ne 0 ]; then
echo
echo " /!\\Error on implicit Chebyshev"
echo
exit
fi
#!/usr/bin/env bash
## name of job
#SBATCH -J chebyshev_50M_10_node
#SBATCH -J explicit_chebyshev_50M_10_node
#SBATCH -p longq
## Resources: (nodes, procs, tasks, walltime, ... etc)
#SBATCH -N 10
#SBATCH -c 24
# # standard output message
#SBATCH -o chebyshev_50M_10_node%j.out
#SBATCH -o explicit_chebyshev_50M_10_node%j.out
# # output error message
#SBATCH -e chebyshev_50M_10_node%j.err
#SBATCH -e explicit_chebyshev_50M_10_node%j.err
#SBATCH --mail-type=ALL --mail-user=martin.khannouz@inria.fr
## modules to load for the job
module purge
module load slurm
module add compiler/gcc/5.3.0 tools/module_cat/1.0.0 intel/mkl/64/11.2/2016.0.0
......@@ -18,21 +19,19 @@ spack load fftw
spack load hwloc
spack load openmpi
spack load starpu@svn-trunk+fxt
## modules to load for the job
## variable for the job
export GROUP_SIZE=500
export TREE_HEIGHT=8
export NB_NODE=$SLURM_JOB_NUM_NODES
export STARPU_NCPU=24
export NB_PARTICLE_PER_NODE=5000000
export STARPU_FXT_PREFIX=`pwd`/
echo "=====my job informations ===="
echo "Node List: " $SLURM_NODELIST
echo "===== Explicit MPI ===="
echo "my jobID: " $SLURM_JOB_ID
echo "Nb node: " $NB_NODE
echo "Tree height: " $TREE_HEIGHT
echo "Group size: " $GROUP_SIZE
echo "Particle per node: " $NB_PARTICLE_PER_NODE
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE))
echo "In the directory: `pwd`"
rm -f canard.fma > /dev/null 2>&1
mpiexec -n $NB_NODE ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average
#TODO probably move trace.rec somewhere else ...
mpiexec -n $NB_NODE ./Build/Tests/Release/testBlockedImplicitChebyshev -f canard.fma -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average
#!/usr/bin/env bash
## name of job
#SBATCH -J implicit_chebyshev_50M_10_node
#SBATCH -p longq
## Resources: (nodes, procs, tasks, walltime, ... etc)
#SBATCH -N 10
#SBATCH -c 24
# # standard output message
#SBATCH -o implicit_chebyshev_50M_10_node%j.out
# # output error message
#SBATCH -e implicit_chebyshev_50M_10_node%j.err
#SBATCH --mail-type=ALL --mail-user=martin.khannouz@inria.fr
## modules to load for the job
module purge
module load slurm
module add compiler/gcc/5.3.0 tools/module_cat/1.0.0 intel/mkl/64/11.2/2016.0.0
. /home/mkhannou/spack/share/spack/setup-env.sh
spack load fftw
spack load hwloc
spack load openmpi
spack load starpu@svn-trunk+fxt
## variable for the job
export GROUP_SIZE=500
export TREE_HEIGHT=8
export NB_NODE=$SLURM_JOB_NUM_NODES
export STARPU_NCPU=24
export NB_PARTICLE_PER_NODE=5000000
export STARPU_FXT_PREFIX=`pwd`/
echo "===== Implicit MPI ===="
echo "my jobID: " $SLURM_JOB_ID
echo "Nb node: " $NB_NODE
echo "Tree height: " $TREE_HEIGHT
echo "Group size: " $GROUP_SIZE
echo "Particle per node: " $NB_PARTICLE_PER_NODE
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE))
mpiexec -n $NB_NODE ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average
......@@ -25,12 +25,11 @@ export NB_NODE=$SLURM_JOB_NUM_NODES
export STARPU_NCPU=24
export NB_PARTICLE_PER_NODE=50000000
export STARPU_FXT_PREFIX=`pwd`/
echo "=====my job informations ===="
echo "Node List: " $SLURM_NODELIST
echo "===== StarPU only ====="
echo "my jobID: " $SLURM_JOB_ID
echo "Nb node: " $NB_NODE
echo "Tree height: " $TREE_HEIGHT
echo "Group size: " $GROUP_SIZE
echo "Particle per node: " $NB_PARTICLE_PER_NODE
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE))
echo "In the directory: `pwd`"
rm -f canard.fma > /dev/null 2>&1
mpiexec -n $NB_NODE ./Build/Tests/Release/testBlockedChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Kernel
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment