Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
ScalFMM
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
5
Issues
5
List
Boards
Labels
Service Desk
Milestones
Operations
Operations
Incidents
Packages & Registries
Packages & Registries
Container Registry
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
solverstack
ScalFMM
Commits
68b80efb
Commit
68b80efb
authored
Apr 12, 2016
by
Martin Khannouz
Committed by
Berenger Bramas
Mar 14, 2017
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Change jobs to print more informations and change timers.
parent
5b4d6981
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
34 additions
and
25 deletions
+34
-25
Doc/noDist/implicit/implicit.org
Doc/noDist/implicit/implicit.org
+6
-1
Tests/GroupTree/testBlockedImplicitAlgorithm.cpp
Tests/GroupTree/testBlockedImplicitAlgorithm.cpp
+4
-5
Tests/GroupTree/testBlockedImplicitChebyshev.cpp
Tests/GroupTree/testBlockedImplicitChebyshev.cpp
+3
-4
Tests/GroupTree/testBlockedMpiAlgorithm.cpp
Tests/GroupTree/testBlockedMpiAlgorithm.cpp
+5
-6
Tests/GroupTree/testBlockedMpiChebyshev.cpp
Tests/GroupTree/testBlockedMpiChebyshev.cpp
+4
-6
compile_dag_result.sh
compile_dag_result.sh
+3
-3
jobs/explicit_mpi_chebyshev.sh
jobs/explicit_mpi_chebyshev.sh
+3
-0
jobs/implicit_chebyshev.sh
jobs/implicit_chebyshev.sh
+3
-0
jobs/starpu_chebyshev.sh
jobs/starpu_chebyshev.sh
+3
-0
No files found.
Doc/noDist/implicit/implicit.org
View file @
68b80efb
...
...
@@ -496,6 +496,9 @@ Mais c'est données n'impliquent pas de forcément des transitions de données m
- Reflexion à propos du graphe de flux de données pour Treematch
- Ajout de tests avec le noyau Chebyshev et la versin mpi implicite
- Symétriser l'algorithme implicite au niveau des P2P
- Modifier les jobs pour qu'il utilisent la même graine et générent le même ensemble de particules
- Post traiter les traces d'une exécution pour créer des graphiques.
- Exploiter les scripts de Samuel
** Et après ?
...
...
@@ -516,8 +519,10 @@ Mais c'est données n'impliquent pas de forcément des transitions de données m
- Ne pas allouer les cellules symboliques si ce n'est pas necessaire
- Distribuer l'arbre
- Post traiter les traces d'une exécution pour créer des graphiques.
- Modifier la génération du trace.rec
- Pour récupérer le temps d'attente active des comm mpi
- Pour qu'il réagisse bien en distribué
- Valider les résultats mpi explicite avec l'algorithme sequentiel plutôt que l'algorithme mpi non bloqué
- Reproduire l'arbre globale
- Ne comparer que les cellules du nœud
- Modifier les jobs pour qu'il utilisent la même graine et générent le même ensemble de particules
- État de l'art load balancing sur non-uniforme
Tests/GroupTree/testBlockedImplicitAlgorithm.cpp
View file @
68b80efb
...
...
@@ -161,6 +161,7 @@ int main(int argc, char* argv[]){
GroupAlgorithm
groupalgo
(
&
groupedTree
,
&
groupkernel
,
distributedMortonIndex
);
FTic
timerExecute
;
groupalgo
.
execute
();
mpiComm
.
global
().
barrier
();
double
elapsedTime
=
timerExecute
.
tacAndElapsed
();
timeAverage
(
mpi_rank
,
nproc
,
elapsedTime
);
...
...
@@ -226,16 +227,14 @@ void timeAverage(int mpi_rank, int nproc, double elapsedTime)
if
(
mpi_rank
==
0
)
{
double
sumElapsedTime
=
elapsedTime
;
std
::
cout
<<
"Executing time node 0 (implicit) : "
<<
sumElapsedTime
<<
"s"
<<
std
::
endl
;
for
(
int
i
=
1
;
i
<
nproc
;
++
i
)
{
double
tmp
;
MPI_Recv
(
&
tmp
,
1
,
MPI_DOUBLE
,
i
,
0
,
MPI_COMM_WORLD
,
0
);
sumElapsedTime
+=
tmp
;
std
::
cout
<<
"Executing time node "
<<
i
<<
" (implicit) : "
<<
tmp
<<
"s"
<<
std
::
endl
;
if
(
tmp
>
sumElapsedTime
)
sumElapsedTime
=
tmp
;
}
sumElapsedTime
=
sumElapsedTime
/
(
double
)
nproc
;
std
::
cout
<<
"Average time per node (implicit) : "
<<
sumElapsedTime
<<
"s"
<<
std
::
endl
;
std
::
cout
<<
"Average time per node (implicit Cheby) : "
<<
sumElapsedTime
<<
"s"
<<
std
::
endl
;
}
else
{
...
...
Tests/GroupTree/testBlockedImplicitChebyshev.cpp
View file @
68b80efb
...
...
@@ -140,6 +140,7 @@ int main(int argc, char* argv[]){
GroupAlgorithm
groupalgo
(
&
groupedTree
,
&
groupkernel
,
distributedMortonIndex
);
FTic
timerExecute
;
groupalgo
.
execute
(
operationsToProceed
);
mpiComm
.
global
().
barrier
();
double
elapsedTime
=
timerExecute
.
tacAndElapsed
();
timeAverage
(
mpi_rank
,
nproc
,
elapsedTime
);
...
...
@@ -258,15 +259,13 @@ void timeAverage(int mpi_rank, int nproc, double elapsedTime)
if
(
mpi_rank
==
0
)
{
double
sumElapsedTime
=
elapsedTime
;
std
::
cout
<<
"Executing time node 0 (implicit Cheby) : "
<<
sumElapsedTime
<<
"s"
<<
std
::
endl
;
for
(
int
i
=
1
;
i
<
nproc
;
++
i
)
{
double
tmp
;
MPI_Recv
(
&
tmp
,
1
,
MPI_DOUBLE
,
i
,
0
,
MPI_COMM_WORLD
,
0
);
sumElapsedTime
+=
tmp
;
std
::
cout
<<
"Executing time node "
<<
i
<<
" (implicit Cheby) : "
<<
tmp
<<
"s"
<<
std
::
endl
;
if
(
tmp
>
sumElapsedTime
)
sumElapsedTime
=
tmp
;
}
sumElapsedTime
=
sumElapsedTime
/
(
double
)
nproc
;
std
::
cout
<<
"Average time per node (implicit Cheby) : "
<<
sumElapsedTime
<<
"s"
<<
std
::
endl
;
}
else
...
...
Tests/GroupTree/testBlockedMpiAlgorithm.cpp
View file @
68b80efb
...
...
@@ -149,8 +149,8 @@ int main(int argc, char* argv[]){
GroupAlgorithm
groupalgo
(
mpiComm
.
global
(),
&
groupedTree
,
&
groupkernel
);
FTic
timerExecute
;
groupalgo
.
execute
();
double
elapsedTime
=
timerExecute
.
tacAndElapsed
();
mpiComm
.
global
().
barrier
();
double
elapsedTime
=
timerExecute
.
tacAndElapsed
();
timeAverage
(
mpiComm
.
global
().
processId
(),
mpiComm
.
global
().
processCount
(),
elapsedTime
);
groupedTree
.
forEachCellLeaf
<
GroupContainerClass
>
([
&
](
GroupCellClass
cell
,
GroupContainerClass
*
leaf
){
...
...
@@ -214,19 +214,18 @@ void timeAverage(int mpi_rank, int nproc, double elapsedTime)
if
(
mpi_rank
==
0
)
{
double
sumElapsedTime
=
elapsedTime
;
std
::
cout
<<
"Executing time node 0 (explicit) : "
<<
sumElapsedTime
<<
"s"
<<
std
::
endl
;
for
(
int
i
=
1
;
i
<
nproc
;
++
i
)
{
double
tmp
;
MPI_Recv
(
&
tmp
,
1
,
MPI_DOUBLE
,
i
,
0
,
MPI_COMM_WORLD
,
0
);
sumElapsedTime
+=
tmp
;
std
::
cout
<<
"Executing time node "
<<
i
<<
" (explicit) : "
<<
tmp
<<
"s"
<<
std
::
endl
;
if
(
tmp
>
sumElapsedTime
)
sumElapsedTime
=
tmp
;
}
sumElapsedTime
=
sumElapsedTime
/
(
double
)
nproc
;
std
::
cout
<<
"Average time per node (explicit) : "
<<
sumElapsedTime
<<
"s"
<<
std
::
endl
;
std
::
cout
<<
"Average time per node (implicit Cheby) : "
<<
sumElapsedTime
<<
"s"
<<
std
::
endl
;
}
else
{
MPI_Send
(
&
elapsedTime
,
1
,
MPI_DOUBLE
,
0
,
0
,
MPI_COMM_WORLD
);
}
MPI_Barrier
(
MPI_COMM_WORLD
);
}
Tests/GroupTree/testBlockedMpiChebyshev.cpp
View file @
68b80efb
...
...
@@ -170,7 +170,7 @@ int main(int argc, char* argv[]){
GroupAlgorithm
groupalgo
(
mpiComm
.
global
(),
&
groupedTree
,
&
groupkernel
);
timer
.
tic
();
groupalgo
.
execute
();
mpiComm
.
global
().
barrier
();
timer
.
tac
();
timeAverage
(
mpiComm
.
global
().
processId
(),
mpiComm
.
global
().
processCount
(),
timer
.
elapsed
());
//std::cout << "Done " << "(@Algorithm = " << timer.elapsed() << "s)." << std::endl;
...
...
@@ -293,16 +293,14 @@ void timeAverage(int mpi_rank, int nproc, double elapsedTime)
if
(
mpi_rank
==
0
)
{
double
sumElapsedTime
=
elapsedTime
;
std
::
cout
<<
"Executing time node 0 (explicit Cheby) : "
<<
sumElapsedTime
<<
"s"
<<
std
::
endl
;
for
(
int
i
=
1
;
i
<
nproc
;
++
i
)
{
double
tmp
;
MPI_Recv
(
&
tmp
,
1
,
MPI_DOUBLE
,
i
,
0
,
MPI_COMM_WORLD
,
0
);
sumElapsedTime
+=
tmp
;
std
::
cout
<<
"Executing time node "
<<
i
<<
" (explicit Cheby) : "
<<
tmp
<<
"s"
<<
std
::
endl
;
if
(
tmp
>
sumElapsedTime
)
sumElapsedTime
=
tmp
;
}
sumElapsedTime
=
sumElapsedTime
/
(
double
)
nproc
;
std
::
cout
<<
"Average time per node (explicit Cheby) : "
<<
sumElapsedTime
<<
"s"
<<
std
::
endl
;
std
::
cout
<<
"Average time per node (implicit Cheby) : "
<<
sumElapsedTime
<<
"s"
<<
std
::
endl
;
}
else
{
...
...
compile_dag_result.sh
View file @
68b80efb
...
...
@@ -4,7 +4,7 @@ export GROUP_SIZE=50
export
TREE_HEIGHT
=
5
export
NB_NODE
=
4
#export NB_PARTICLE_PER_NODE=$(( (`awk "BEGIN{print 8 ** ($TREE_HEIGHT-1)}"` / $NB_NODE) ))
export
NB_PARTICLE_PER_NODE
=
5000
export
NB_PARTICLE_PER_NODE
=
5000
0
export
STARPU_NCPU
=
1
export
STARPU_FXT_PREFIX
=
`
pwd
`
/
...
...
@@ -79,7 +79,7 @@ chebyshev_kernel()
#Get task information
cp
-f
$SCALFMM_SIMGRIDOUT
scalfmm_explicit.out
mpiexec
-n
$NB_NODE
./Tests/Release/testBlockedImplicitChebyshev
-nb
$NB_PARTICLE_PER_NODE
-bs
$GROUP_SIZE
-h
$TREE_HEIGHT
mpiexec
-n
$NB_NODE
./Tests/Release/testBlockedImplicitChebyshev
-nb
$NB_PARTICLE_PER_NODE
-bs
$GROUP_SIZE
-h
$TREE_HEIGHT
>
pieuvre
if
[
$?
-ne
0
]
;
then
echo
echo
" /!
\\
Error on implicit Chebyshev"
...
...
@@ -96,6 +96,6 @@ chebyshev_kernel()
./Tests/Release/compareDAGmapping
-e
scalfmm_explicit.out
-i
scalfmm_implicit.out
-h
$TREE_HEIGHT
>
narval
}
test_kernel
#
test_kernel
chebyshev_kernel
jobs/explicit_mpi_chebyshev.sh
View file @
68b80efb
...
...
@@ -28,9 +28,12 @@ export NB_PARTICLE_PER_NODE=5000000
export
STARPU_FXT_PREFIX
=
`
pwd
`
/
echo
"===== Explicit MPI ===="
echo
"my jobID: "
$SLURM_JOB_ID
echo
"Model: cube"
echo
"Nb node: "
$NB_NODE
echo
"Nb thread: "
$STARPU_NCPU
echo
"Tree height: "
$TREE_HEIGHT
echo
"Group size: "
$GROUP_SIZE
echo
"Algorithm: explicit"
echo
"Particle per node: "
$NB_PARTICLE_PER_NODE
echo
"Total particles: "
$((
$NB_PARTICLE_PER_NODE
*
$NB_NODE
))
mpiexec
-n
$NB_NODE
./Build/Tests/Release/testBlockedMpiChebyshev
-nb
$NB_PARTICLE_PER_NODE
-bs
$GROUP_SIZE
-h
$TREE_HEIGHT
-no-validation
|
grep
Average
...
...
jobs/implicit_chebyshev.sh
View file @
68b80efb
...
...
@@ -28,9 +28,12 @@ export NB_PARTICLE_PER_NODE=5000000
export
STARPU_FXT_PREFIX
=
`
pwd
`
/
echo
"===== Implicit MPI ===="
echo
"my jobID: "
$SLURM_JOB_ID
echo
"Model: cube"
echo
"Nb node: "
$NB_NODE
echo
"Nb thread: "
$STARPU_NCPU
echo
"Tree height: "
$TREE_HEIGHT
echo
"Group size: "
$GROUP_SIZE
echo
"Algorithm: implicit"
echo
"Particle per node: "
$NB_PARTICLE_PER_NODE
echo
"Total particles: "
$((
$NB_PARTICLE_PER_NODE
*
$NB_NODE
))
mpiexec
-n
$NB_NODE
./Build/Tests/Release/testBlockedImplicitChebyshev
-nb
$NB_PARTICLE_PER_NODE
-bs
$GROUP_SIZE
-h
$TREE_HEIGHT
-no-validation
|
grep
Average
...
...
jobs/starpu_chebyshev.sh
View file @
68b80efb
...
...
@@ -27,9 +27,12 @@ export NB_PARTICLE_PER_NODE=50000000
export
STARPU_FXT_PREFIX
=
`
pwd
`
/
echo
"===== StarPU only ====="
echo
"my jobID: "
$SLURM_JOB_ID
echo
"Model: cube"
echo
"Nb node: "
$NB_NODE
echo
"Nb thread: "
$STARPU_NCPU
echo
"Tree height: "
$TREE_HEIGHT
echo
"Group size: "
$GROUP_SIZE
echo
"Algorithm: starpu"
echo
"Particle per node: "
$NB_PARTICLE_PER_NODE
echo
"Total particles: "
$((
$NB_PARTICLE_PER_NODE
*
$NB_NODE
))
mpiexec
-n
$NB_NODE
./Build/Tests/Release/testBlockedChebyshev
-nb
$NB_PARTICLE_PER_NODE
-bs
$GROUP_SIZE
-h
$TREE_HEIGHT
-no-validation
|
grep
Kernel
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment