diff --git a/Doc/noDist/implicit/implicit.org b/Doc/noDist/implicit/implicit.org
index c73cb7ae6107b7ebe07ffd383f9f05f54433ff24..7cddacf15e4142da3a203c18aa924e30249b91ed 100644
--- a/Doc/noDist/implicit/implicit.org
+++ b/Doc/noDist/implicit/implicit.org
@@ -114,6 +114,203 @@ It also create a DAG from which interesting property can be used to prove intere
     TODO how is it currently working ?
     Mpi task posted manualy. 
 
+* Setup
+** Installing
+*** FxT
+Installing Fxt for debbuging StarPU:
+#+begin_src
+wget http://download.savannah.gnu.org/releases/fkt/fxt-0.2.11.tar.gz
+tar xf fxt-0.2.11.tar.gz
+cd fxt-0.2.11
+./configure --prefix=$FXT_INSTALL_DIR
+make
+make install
+#+end_src
+
+Remember to set /FXT_INSTALL_DIR/.
+
+For more information, check [[http://starpu.gforge.inria.fr/doc/html/OfflinePerformanceTools.html][here]].
+
+*** starpu
+Then, install starpu and its dependancy.
+You may have to install /hwloc/ package with your favorite package manager.
+#+begin_src
+svn checkout svn://scm.gforge.inria.fr/svn/starpu/trunk StarPU
+cd StarPU
+./autogen.sh
+mkdir install
+./configure --prefix=$STARPU_INSTALL_DIR \
+      --disable-cuda \
+      --disable-opencl \
+      --disable-fortran \
+      --with-fxt=$FXT_INSTALL_DIR \
+      --disable-debug \
+      --enable-openmp \
+      --disable-verbose \
+      --disable-gcc-extensions \
+      --disable-starpu-top \
+      --disable-build-doc \
+      --disable-build-examples \
+      --disable-starpufft \
+      --disable-allocation-cache
+./configure --prefix=$PWD/install --with-fxt
+make
+make install
+#+end_src
+
+Remember to set /STARPU_INSTALL_DIR/.
+
+This are envirronement variable that might be useful to set. But of course, be smart and replace the path in STARPU_DIR by your path.
+#+begin_src
+export PKG_CONFIG_PATH=$FXT_INSTALL_DIR/lib/pkgconfig:$PKG_CONFIG_PATH
+export PKG_CONFIG_PATH=$STARPU_INSTALL_DIR/lib/pkgconfig:$PKG_CONFIG_PATH
+export LD_LIBRARY_PATH=$STARPU_INSTALL_DIR/lib:$LD_LIBRARY_PATH
+export PATH=$PATH:$STARPU_INSTALL_DIR/bin
+#+end_src
+
+If you are on Debian or Debian like distribution, simpler way to install StarPU are described [[http://starpu.gforge.inria.fr/doc/html/BuildingAndInstallingStarPU.html][here]].
+
+*** Scalfmm
+Finally, install Scalfmm:
+For those with access on the scalfmm repository, you can get scalfmm this way:
+#+begin_src
+git clone git+ssh://mkhannou@scm.gforge.inria.fr/gitroot/scalfmm/scalfmm.git
+cd scalfmm
+git checkout mpi_implicit
+#+end_src
+If you don't have any access to this repository, you can 
+get a tarbal this way:
+#+begin_src
+wget http://scalfmm.gforge.inria.fr/orgmode/implicit/scalfmm.tar.gz
+tar xf scalfmm.tar.gz
+cd scalfmm
+#+end_src
+
+Then:
+#+begin_src
+cd Build
+cmake .. -DSCALFMM_USE_MPI=ON -DSCALFMM_USE_STARPU=ON -DSCALFMM_USE_FFT=ON -DSCALFMM_BUILD_EXAMPLES=ON -DSCALFMM_BUILD_TESTS=ON 
+make  testBlockedChebyshev testBlockedImplicitChebyshev testBlockedMpiChebyshev testBlockedImplicitAlgorithm testBlockedMpiAlgorithm 
+#+end_src
+
+*** Execute
+Here is a quick way to execute on your computer:
+#+begin_src
+cd scalfmm/Build
+export STARPU_FXT_PREFIX=otter
+mpiexec -n 2 ./Tests/Release/testBlockedImplicitChebyshev -nb 50000 -bs 100 -h 5
+#+end_src
+
+If you want to gather the traces for this specific case:
+#+begin_src
+starpu_fxt_tool -i otter*
+#+end_src
+This will create /paje.trace/, /trace.rec/, /task.rec/ and many other trace file.
+
+
+** Useful script
+*** Setup on plafrim
+To setup everything that is needed on plafrim I first install spack.
+#+begin_src sh
+git clone https://github.com/fpruvost/spack.git
+#+end_src
+
+
+
+Then you have to add spack binary in your path.
+
+#+begin_src sh
+PATH=$PATH:spack/bin/spack
+#+end_src
+If your default python interpreter isn't python 2, you might have to replace the first line of spack/bin/spack by
+#+begin_src sh
+#!/usr/bin/env python2
+#+end_src
+So the script is automaticly run with python 2.
+Then, you have to add your ssh key to your ssh agent. The following script kill all ssh agent, then respawn it and add the ssh key.
+#+begin_src sh
+SSH_KEY=".ssh/rsa_inria"
+killall -9 ssh-agent > /dev/null
+eval `ssh-agent` > /dev/null
+ssh-add $SSH_KEY
+#+end_src
+
+Because on plafrim, users can't connect to the rest of the world, you have to copy data there.
+So copy spack directory, use spack to create a mirror that will be sent to plafrim so spack will be able to install package.
+
+#+begin_src sh
+MIRROR_DIRECTORY="tarball_scalfmm"
+#Copy spack to plafrim
+scp -r spack mkhannou@plafrim:/home/mkhannou
+#Recreate the mirror
+rm -rf $MIRROR_DIRECTORY
+mkdir $MIRROR_DIRECTORY
+spack mirror create -D -d $MIRROR_DIRECTORY starpu@svn-trunk+mpi \^openmpi
+#Create an archive and send it to plafrim
+tar czf /tmp/canard.tar.gz $MIRROR_DIRECTORY 
+scp /tmp/canard.tar.gz mkhannou@plafrim-ext:/home/mkhannou
+rm -f /tmp/canard.tar.gz
+#Install on plafrim
+ssh mkhannou@plafrim 'tar xf canard.tar.gz; rm -f canard.tar.gz'
+ssh mkhannou@plafrim "/home/mkhannou/spack/bin/spack mirror add local_filesystem file:///home/mkhannou/$MIRROR_DIRECTORY"
+ssh mkhannou@plafrim '/home/mkhannou/spack/bin/spack install starpu@svn-trunk+mpi+fxt \^openmpi'
+#+end_src
+
+      TODO add script I add on plafrim side with library links.
+
+*** Execute on plafrim
+To run my tests on plafrim, I used the two following scripts.
+One to send the scalfmm repository to plafrim.
+
+#+include: "~/narval.sh" src sh
+
+Note : you might have to add your ssh_key again if you killed your previous ssh agent.
+
+Then, the one that is runned on plafrim. It configure, compile and submit all the jobs on plafrim.
+
+#+begin_src sh
+module add slurm
+module add compiler/gcc/5.3.0 tools/module_cat/1.0.0 intel/mkl/64/11.2/2016.0.0
+
+# specific to plafrim to get missing system libs
+export LIBRARY_PATH=/usr/lib64:$LIBRARY_PATH
+
+# load spack env
+export SPACK_ROOT=$HOME/spack
+. $SPACK_ROOT/share/spack/setup-env.sh
+
+spack load fftw
+spack load hwloc
+spack load openmpi
+spack load starpu@svn-trunk+fxt
+cd scalfmm/Build
+rm -rf CMakeCache.txt CMakeFiles > /dev/null
+cmake .. -DSCALFMM_USE_MPI=ON -DSCALFMM_USE_STARPU=ON -DSCALFMM_USE_FFT=ON -DSCALFMM_BUILD_EXAMPLES=ON -DSCALFMM_BUILD_TESTS=ON -DCMAKE_CXX_COMPILER=`which g++` 
+make clean
+make  testBlockedChebyshev testBlockedImplicitChebyshev testBlockedMpiChebyshev testBlockedImplicitAlgorithm testBlockedMpiAlgorithm 
+
+cd ..
+
+files=./jobs/*.sh
+mkdir jobs_result
+for f in $files
+do
+    echo "Submit $f..."
+    sbatch $f
+    
+    if [ "$?" != "0" ] ; then 
+        break;
+    fi
+done
+#+end_src
+
+*** Export orgmode somewhere accessible
+A good place I found to put your orgmode file and its html part is on the inria forge, in your project repository.
+For me it was the path /home/groups/scalfmm/htdocs.
+So I created a directory named orgmode and create the following script to update the files.
+
+#+include: "~/scalfmm/export_orgmode.sh" src sh
+
 * Implicit MPI FMM
 ** Sequential Task Flow with implicit communication
    There is very few difference between the STF and implicite MPI STF.
@@ -254,9 +451,11 @@ One node has 2 Dodeca-core Haswell Intel® Xeon® E5-2680, 2,5GHz, 128Go de RAM
 
 #+begin_src src C
 mpiComm.global().barrier();
-timer.tic();
+FTic timer;
+starpu_fxt_start_profiling();
 groupalgo.execute();
 mpiComm.global().barrier();
+starpu_fxt_stop_profiling();
 timer.tac();
 #+end_src
 
@@ -307,158 +506,6 @@ The scripts of the jobs for StarPU with implicit mpi on 10 nodes:
 #+CAPTION:  Parallel efficiency on cube ([[./output/cube-parallel-efficiency.pdf][pdf]]).
 [[./output/cube-parallel-efficiency.png]]
 
-* Notes
-** Installing
-
-Installing Fxt for debbuging StarPU:
-#+begin_src
-wget http://download.savannah.gnu.org/releases/fkt/fxt-0.2.11.tar.gz
-tar xf fxt-0.2.11.tar.gz
-cd fxt
-mkdir install
-./configure --prefix=$PWD/install
-make
-make install
-export PKG_CONFIG_PATH=/home/mkhannou/fxt/install/lib/pkgconfig:$PKG_CONFIG_PATH
-#+end_src
-
-Then, install StarPu and its dependancy.
-#+begin_src
-pacman -S hwloc
-svn checkout svn://scm.gforge.inria.fr/svn/starpu/trunk StarPU
-cd StarPU
-./autogen.sh
-mkdir install
-./configure --prefix=$PWD/install --with-fxt
-make
-make install
-#+end_src
-
-This are envirronement variable that might be useful to set. But of course, be smart and replace the path in STARPU_DIR by your path.
-#+begin_src
-export STARPU_DIR=/home/mkhannou/StarPU/install
-export PKG_CONFIG_PATH=$STARPU_DIR/lib/pkgconfig:$PKG_CONFIG_PATH
-export LD_LIBRARY_PATH=$STARPU_DIR/lib:$LD_LIBRARY_PATH
-export STARPU_GENERATE_TRACE=1
-export PATH=$PATH:$STARPU_DIR/bin
-#+end_src
-
-If you are on Debian or Debian like distribution, simpler way to install StarPU are described [[http://starpu.gforge.inria.fr/doc/html/BuildingAndInstallingStarPU.html][here]].
-
-Finally, install Scalfmm:
-#+begin_src
-git clone git+ssh://mkhannou@scm.gforge.inria.fr/gitroot/scalfmm/scalfmm.git
-cd scalfmm
-git checkout mpi_implicit
-cd Build
-cmake .. -DSCALFMM_USE_MPI=ON -DSCALFMM_USE_STARPU=ON -DSCALFMM_USE_FFT=ON -DSCALFMM_BUILD_EXAMPLES=ON -DSCALFMM_BUILD_TESTS=ON -DCMAKE_CXX_COMPILER=`which g++` 
-make  testBlockedChebyshev testBlockedImplicitChebyshev testBlockedMpiChebyshev testBlockedImplicitAlgorithm testBlockedMpiAlgorithm 
-#+end_src
-
-** Useful script
-*** Setup on plafrim
-To setup everything that is needed on plafrim I first install spack.
-#+begin_src sh
-git clone https://github.com/fpruvost/spack.git
-#+end_src
-
-
-
-Then you have to add spack binary in your path.
-
-#+begin_src sh
-PATH=$PATH:spack/bin/spack
-#+end_src
-If your default python interpreter isn't python 2, you might have to replace the first line of spack/bin/spack by
-#+begin_src sh
-#!/usr/bin/env python2
-#+end_src
-So the script is automaticly run with python 2.
-Then, you have to add your ssh key to your ssh agent. The following script kill all ssh agent, then respawn it and add the ssh key.
-#+begin_src sh
-SSH_KEY=".ssh/rsa_inria"
-killall -9 ssh-agent > /dev/null
-eval `ssh-agent` > /dev/null
-ssh-add $SSH_KEY
-#+end_src
-
-Because on plafrim, users can't connect to the rest of the world, you have to copy data there.
-So copy spack directory, use spack to create a mirror that will be sent to plafrim so spack will be able to install package.
-
-#+begin_src sh
-MIRROR_DIRECTORY="tarball_scalfmm"
-#Copy spack to plafrim
-scp -r spack mkhannou@plafrim:/home/mkhannou
-#Recreate the mirror
-rm -rf $MIRROR_DIRECTORY
-mkdir $MIRROR_DIRECTORY
-spack mirror create -D -d $MIRROR_DIRECTORY starpu@svn-trunk+mpi \^openmpi
-#Create an archive and send it to plafrim
-tar czf /tmp/canard.tar.gz $MIRROR_DIRECTORY 
-scp /tmp/canard.tar.gz mkhannou@plafrim-ext:/home/mkhannou
-rm -f /tmp/canard.tar.gz
-#Install on plafrim
-ssh mkhannou@plafrim 'tar xf canard.tar.gz; rm -f canard.tar.gz'
-ssh mkhannou@plafrim "/home/mkhannou/spack/bin/spack mirror add local_filesystem file:///home/mkhannou/$MIRROR_DIRECTORY"
-ssh mkhannou@plafrim '/home/mkhannou/spack/bin/spack install starpu@svn-trunk+mpi+fxt \^openmpi'
-#+end_src
-
-      TODO add script I add on plafrim side with library links.
-
-*** Execute on plafrim
-To run my tests on plafrim, I used the two following scripts.
-One to send the scalfmm repository to plafrim.
-
-#+include: "~/narval.sh" src sh
-
-Note : you might have to add your ssh_key again if you killed your previous ssh agent.
-
-Then, the one that is runned on plafrim. It configure, compile and submit all the jobs on plafrim.
-
-#+begin_src sh
-module add slurm
-module add compiler/gcc/5.3.0 tools/module_cat/1.0.0 intel/mkl/64/11.2/2016.0.0
-
-# specific to plafrim to get missing system libs
-export LIBRARY_PATH=/usr/lib64:$LIBRARY_PATH
-
-# load spack env
-export SPACK_ROOT=$HOME/spack
-. $SPACK_ROOT/share/spack/setup-env.sh
-
-spack load fftw
-spack load hwloc
-spack load openmpi
-spack load starpu@svn-trunk+fxt
-cd scalfmm/Build
-rm -rf CMakeCache.txt CMakeFiles > /dev/null
-cmake .. -DSCALFMM_USE_MPI=ON -DSCALFMM_USE_STARPU=ON -DSCALFMM_USE_FFT=ON -DSCALFMM_BUILD_EXAMPLES=ON -DSCALFMM_BUILD_TESTS=ON -DCMAKE_CXX_COMPILER=`which g++` 
-make clean
-make  testBlockedChebyshev testBlockedImplicitChebyshev testBlockedMpiChebyshev testBlockedImplicitAlgorithm testBlockedMpiAlgorithm 
-
-cd ..
-
-files=./jobs/*.sh
-mkdir jobs_result
-for f in $files
-do
-    echo "Submit $f..."
-    sbatch $f
-    
-    if [ "$?" != "0" ] ; then 
-        break;
-    fi
-done
-#+end_src
-
-*** Export orgmode somewhere accessible
-A good place I found to put your orgmode file and its html part is on the inria forge, in your project repository.
-For me it was the path /home/groups/scalfmm/htdocs.
-So I created a directory named orgmode and create the following script to update the files.
-
-#+include: "~/scalfmm/export_orgmode.sh" src sh
-
-
 * Journal
 ** Implémentation mpi implicite très naïve
 Cette première version avait pour principal but de découvrir et à prendre en main les fonctions de StarPU MPI.
diff --git a/Tests/GroupTree/testBlockedChebyshev.cpp b/Tests/GroupTree/testBlockedChebyshev.cpp
index 9c76bd3710902bbdd0a297021300026429be0d77..f17669bb3794652ece2e1337b159e8ff42d3ee33 100644
--- a/Tests/GroupTree/testBlockedChebyshev.cpp
+++ b/Tests/GroupTree/testBlockedChebyshev.cpp
@@ -121,8 +121,11 @@ int main(int argc, char* argv[]){
     GroupAlgorithm groupalgo(&groupedTree,&groupkernel);
 
     timer.tic();
+	starpu_fxt_start_profiling();
     groupalgo.execute();
-    std::cout << "Average executed in in " << timer.tacAndElapsed() << "s\n";
+	starpu_fxt_stop_profiling();
+	timer.tac();
+    std::cout << "Average executed in in " << timer.elapsed() << "s\n";
 
     // Validate the result
     if(FParameters::existParameter(argc, argv, LocalOptionNoValidate.options) == false){
diff --git a/Tests/GroupTree/testBlockedImplicitChebyshev.cpp b/Tests/GroupTree/testBlockedImplicitChebyshev.cpp
index e759106308ad137c1e09788a6f27c15dd665e2b5..135c27689eb9b50522f7a44698ef487ddc1dfa38 100644
--- a/Tests/GroupTree/testBlockedImplicitChebyshev.cpp
+++ b/Tests/GroupTree/testBlockedImplicitChebyshev.cpp
@@ -140,10 +140,12 @@ int main(int argc, char* argv[]){
     GroupAlgorithm groupalgo(&groupedTree,&groupkernel, distributedMortonIndex);
 	mpiComm.global().barrier();
 	FTic timerExecute;
+	starpu_fxt_start_profiling();
 	groupalgo.execute(operationsToProceed);
 	mpiComm.global().barrier();
-	double elapsedTime = timerExecute.tacAndElapsed();
-	timeAverage(mpi_rank, nproc, elapsedTime);
+	starpu_fxt_stop_profiling();
+	timerExecute.tac();
+	timeAverage(mpi_rank, nproc, timerExecute.elapsed());
 	
     // Validate the result
     if(FParameters::existParameter(argc, argv, LocalOptionNoValidate.options) == false){
diff --git a/Tests/GroupTree/testBlockedMpiChebyshev.cpp b/Tests/GroupTree/testBlockedMpiChebyshev.cpp
index 8548d2d9b7bcc904a43f06f0ba54ddb42c70bf80..e13dab62fde96b2d752c7a9567b1a493ca4f94c8 100644
--- a/Tests/GroupTree/testBlockedMpiChebyshev.cpp
+++ b/Tests/GroupTree/testBlockedMpiChebyshev.cpp
@@ -170,8 +170,10 @@ int main(int argc, char* argv[]){
         GroupAlgorithm groupalgo(mpiComm.global(), &groupedTree,&groupkernel);
 		mpiComm.global().barrier();
         timer.tic();
+		starpu_fxt_start_profiling();
         groupalgo.execute();
 		mpiComm.global().barrier();
+		starpu_fxt_stop_profiling();
         timer.tac();
 		timeAverage(mpiComm.global().processId(), mpiComm.global().processCount(), timer.elapsed());
         //std::cout << "Done  " << "(@Algorithm = " << timer.elapsed() << "s)." << std::endl;
diff --git a/Utils/benchmark/loutre.py b/Utils/benchmark/loutre.py
index db21effc04b9a33d5e5b150ca447f297714f9fe5..516043cf9d84898dd5b7385616266dc0b17039ce 100755
--- a/Utils/benchmark/loutre.py
+++ b/Utils/benchmark/loutre.py
@@ -55,7 +55,7 @@ class ScalFMMConfig(object):
         return header
 
 
-    def gen_record(self, global_time, runtime_time, task_time, idle_time, scheduling_time, rmem):
+    def gen_record(self, global_time, runtime_time, task_time, idle_time, scheduling_time, communication_time, rmem):
         columns = [
             self.model,
             self.algorithm,
@@ -69,7 +69,7 @@ class ScalFMMConfig(object):
             task_time,
             idle_time,
             scheduling_time,
-            0.0,
+            communication_time,
             rmem,
         ]
         record = ""
@@ -97,6 +97,7 @@ def get_times_from_trace_file(filename):
     idle_time = 0.0
     runtime_time = 0.0
     scheduling_time = 0.0
+    communication_time = 0.0
     for line in stdout.decode().splitlines():
         arr = line.replace("\"", "").split(",")
         if arr[0] == "Name":
@@ -112,7 +113,7 @@ def get_times_from_trace_file(filename):
             elif arr[2] == "Other":
                 idle_time = float(arr[3])
             # sys.exit("Invalid time!")
-    return runtime_time, task_time, idle_time, scheduling_time
+    return runtime_time, task_time, idle_time, scheduling_time, communication_time
 
 def main():
     output_trace_file=""
@@ -158,7 +159,7 @@ def main():
             if re.search("Average", line):
                 a = re.findall("[-+]?\d*\.\d+|\d+", line)
                 if len(a) == 1:
-                    global_time = a[0]
+                    global_time = float(a[0])*1000 # Else it is in sec
             elif re.search("Total particles", line):
                 a = re.findall("[-+]?\d*\.\d+|\d+", line)
                 if len(a) == 1:
@@ -185,16 +186,22 @@ def main():
                 config.algorithm = line[line.index(":")+1:].strip()
 
     if (os.path.isfile(trace_filename)): #Time in milli
-        runtime_time, task_time, idle_time, scheduling_time = get_times_from_trace_file(trace_filename)
+        runtime_time, task_time, idle_time, scheduling_time, communication_time = get_times_from_trace_file(trace_filename)
     else:
         print("File doesn't exist " + trace_filename)
+    sum_time = (runtime_time + task_time + scheduling_time + communication_time)/(config.num_nodes*config.num_threads)
+    diff_time = float('%.2f'%(abs(global_time-sum_time)/global_time))
+
+    if diff_time > 0.01:   
+        print('\033[31m/!\\Timing Error of ' + str(diff_time) + '\033[39m')
 
     # Write a record to the output file.
-    output_file.write(config.gen_record(float(global_time),
+    output_file.write(config.gen_record(global_time,
                       float(runtime_time),
                       float(task_time),
                       float(idle_time),
                       float(scheduling_time),
+                      float(communication_time),
                       int(rmem)))
 
 main()
diff --git a/export_orgmode.sh b/export_orgmode.sh
index 66c2288898a40d854c9245561efcaca1db72fc64..5609edcabe656632ded62adc221525591d62d72f 100755
--- a/export_orgmode.sh
+++ b/export_orgmode.sh
@@ -3,5 +3,9 @@ cd /home/mkhannou/scalfmm/Doc/noDist/implicit
 emacs implicit.org --batch -f org-html-export-to-html --kill
 ssh scm.gforge.inria.fr "cd /home/groups/scalfmm/htdocs/orgmode/; rm -rf implicit"
 cd ..
-scp -r implicit scm.gforge.inria.fr:/home/groups/scalfmm/htdocs/orgmode/
+rsync -e ssh -avz --delete-after implicit scm.gforge.inria.fr:/home/groups/scalfmm/htdocs/orgmode/
 ssh scm.gforge.inria.fr "cd /home/groups/scalfmm/htdocs/orgmode/; chmod og+r implicit -R;"
+
+#Put an up-to-date tarbal
+cd /home/mkhannou/scalfmm
+./export_tarbal.sh &
diff --git a/export_tarbal.sh b/export_tarbal.sh
new file mode 100755
index 0000000000000000000000000000000000000000..033cf419a3e73026f238c994e9ce4558ae84ee5c
--- /dev/null
+++ b/export_tarbal.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+mkdir platypus 
+cd platypus
+git clone git+ssh://mkhannou@scm.gforge.inria.fr/gitroot/scalfmm/scalfmm.git >/dev/null 2>&1
+cd scalfmm
+git checkout mpi_implicit >/dev/null 2>&1
+cd ..
+rm -rf scalfmm/.git > /dev/null
+tar czf scalfmm.tar.gz scalfmm
+scp scalfmm.tar.gz scm.gforge.inria.fr:/home/groups/scalfmm/htdocs/orgmode/implicit >/dev/null
+cd ..
+rm -rf platypus >/dev/null
diff --git a/jobs/explicit_10N_chebyshev.sh b/jobs/explicit_10N_chebyshev.sh
index 4e2cae5877ebe7be439021962cffd02a67aba11a..4625ca7268f4fe4f3243dda2ee1a608c6594b1ce 100644
--- a/jobs/explicit_10N_chebyshev.sh
+++ b/jobs/explicit_10N_chebyshev.sh
@@ -26,6 +26,7 @@ export STARPU_NCPU=24
 export NB_PARTICLE_PER_NODE=5000000
 export STARPU_FXT_PREFIX=$SLURM_JOB_ID
 export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
+export STARPU_COMM_STATS=1
 $NUMACTL=numactl --interleave=all
 mkdir $FINAL_DIR
 echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
@@ -37,7 +38,7 @@ echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
 echo "Algorithm: explicit" >> $FINAL_DIR/stdout
 echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
 echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
-mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
+mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation >> $FINAL_DIR/stdout
 
 #Create argument list for starpu_fxt_tool
 cd $FINAL_DIR
diff --git a/jobs/explicit_1N_chebyshev.sh b/jobs/explicit_1N_chebyshev.sh
index f9417510d08f7474e173cf4566421ec47cab877d..3cee87eb7fbe3196ab274564f5fec3e3264d1581 100644
--- a/jobs/explicit_1N_chebyshev.sh
+++ b/jobs/explicit_1N_chebyshev.sh
@@ -26,6 +26,7 @@ export STARPU_NCPU=24
 export NB_PARTICLE_PER_NODE=50000000
 export STARPU_FXT_PREFIX=$SLURM_JOB_ID
 export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
+export STARPU_COMM_STATS=1
 $NUMACTL=numactl --interleave=all
 mkdir $FINAL_DIR
 echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
@@ -37,7 +38,7 @@ echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
 echo "Algorithm: explicit" >> $FINAL_DIR/stdout
 echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
 echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
-mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
+mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation >> $FINAL_DIR/stdout
 
 #Create argument list for starpu_fxt_tool
 cd $FINAL_DIR
diff --git a/jobs/explicit_2N_chebyshev.sh b/jobs/explicit_2N_chebyshev.sh
index ff0743b4f331aabf714369eee4b0e00828b15509..4e49ccc75215733c51edea525b06a835f5b74848 100644
--- a/jobs/explicit_2N_chebyshev.sh
+++ b/jobs/explicit_2N_chebyshev.sh
@@ -26,6 +26,7 @@ export STARPU_NCPU=24
 export NB_PARTICLE_PER_NODE=25000000
 export STARPU_FXT_PREFIX=$SLURM_JOB_ID
 export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
+export STARPU_COMM_STATS=1
 $NUMACTL=numactl --interleave=all
 mkdir $FINAL_DIR
 echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
@@ -37,7 +38,7 @@ echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
 echo "Algorithm: explicit" >> $FINAL_DIR/stdout
 echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
 echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
-mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
+mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation >> $FINAL_DIR/stdout
 
 #Create argument list for starpu_fxt_tool
 cd $FINAL_DIR
@@ -52,4 +53,3 @@ cd ..
 
 ##Move the result into a directory where all result goes
 mv $FINAL_DIR jobs_result
-
diff --git a/jobs/explicit_4N_chebyshev.sh b/jobs/explicit_4N_chebyshev.sh
index 2b2cad58bc4f14ecb10ba3e3904d1c2ce011bd99..7557042050a3659c202aef79e31b61a1ff262f09 100644
--- a/jobs/explicit_4N_chebyshev.sh
+++ b/jobs/explicit_4N_chebyshev.sh
@@ -26,6 +26,7 @@ export STARPU_NCPU=24
 export NB_PARTICLE_PER_NODE=12500000
 export STARPU_FXT_PREFIX=$SLURM_JOB_ID
 export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
+export STARPU_COMM_STATS=1
 $NUMACTL=numactl --interleave=all
 mkdir $FINAL_DIR
 echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
@@ -37,7 +38,7 @@ echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
 echo "Algorithm: explicit" >> $FINAL_DIR/stdout
 echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
 echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
-mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
+mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation >> $FINAL_DIR/stdout
 
 #Create argument list for starpu_fxt_tool
 cd $FINAL_DIR
diff --git a/jobs/explicit_8N_chebyshev.sh b/jobs/explicit_8N_chebyshev.sh
index 85ef726582f6a962a83c9c778bf2a9a0a944c6ad..d72d89d8f4321e31eeb5f0958d73bb328d687732 100644
--- a/jobs/explicit_8N_chebyshev.sh
+++ b/jobs/explicit_8N_chebyshev.sh
@@ -26,6 +26,7 @@ export STARPU_NCPU=24
 export NB_PARTICLE_PER_NODE=6250000
 export STARPU_FXT_PREFIX=$SLURM_JOB_ID
 export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
+export STARPU_COMM_STATS=1
 $NUMACTL=numactl --interleave=all
 mkdir $FINAL_DIR
 echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
@@ -37,7 +38,7 @@ echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
 echo "Algorithm: explicit" >> $FINAL_DIR/stdout
 echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
 echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
-mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
+mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation >> $FINAL_DIR/stdout
 
 #Create argument list for starpu_fxt_tool
 cd $FINAL_DIR
diff --git a/jobs/implicit_10N_chebyshev.sh b/jobs/implicit_10N_chebyshev.sh
index dccf3384c00b1038d08323f88b51004ed066bc52..de8c978b075cd17bb92f174526a0353b455e7ef9 100644
--- a/jobs/implicit_10N_chebyshev.sh
+++ b/jobs/implicit_10N_chebyshev.sh
@@ -26,6 +26,7 @@ export STARPU_NCPU=24
 export NB_PARTICLE_PER_NODE=5000000
 export STARPU_FXT_PREFIX=$SLURM_JOB_ID
 export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
+export STARPU_COMM_STATS=1
 $NUMACTL=numactl --interleave=all
 mkdir $FINAL_DIR
 echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
@@ -37,7 +38,7 @@ echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
 echo "Algorithm: implicit" >> $FINAL_DIR/stdout
 echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
 echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
-mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
+mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation >> $FINAL_DIR/stdout
 
 #Create argument list for starpu_fxt_tool
 cd $FINAL_DIR
diff --git a/jobs/implicit_1N_chebyshev.sh b/jobs/implicit_1N_chebyshev.sh
index 9cab04a79c652d32ab8b4cbcf311a13516045f87..b3ebefff8c41f683c230b4a172889b9b837adfd7 100644
--- a/jobs/implicit_1N_chebyshev.sh
+++ b/jobs/implicit_1N_chebyshev.sh
@@ -26,6 +26,7 @@ export STARPU_NCPU=24
 export NB_PARTICLE_PER_NODE=50000000
 export STARPU_FXT_PREFIX=$SLURM_JOB_ID
 export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
+export STARPU_COMM_STATS=1
 $NUMACTL=numactl --interleave=all
 mkdir $FINAL_DIR
 echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
@@ -37,7 +38,7 @@ echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
 echo "Algorithm: implicit" >> $FINAL_DIR/stdout
 echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
 echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
-mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
+mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation >> $FINAL_DIR/stdout
 
 #Create argument list for starpu_fxt_tool
 cd $FINAL_DIR
diff --git a/jobs/implicit_2N_chebyshev.sh b/jobs/implicit_2N_chebyshev.sh
index 12111b971de8a90be14f88629a772a8b6a2eef02..50f53075f50ce1d40ec8714aac19d29cbea88aff 100644
--- a/jobs/implicit_2N_chebyshev.sh
+++ b/jobs/implicit_2N_chebyshev.sh
@@ -26,6 +26,7 @@ export STARPU_NCPU=24
 export NB_PARTICLE_PER_NODE=25000000
 export STARPU_FXT_PREFIX=$SLURM_JOB_ID
 export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
+export STARPU_COMM_STATS=1
 $NUMACTL=numactl --interleave=all
 mkdir $FINAL_DIR
 echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
@@ -37,7 +38,7 @@ echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
 echo "Algorithm: implicit" >> $FINAL_DIR/stdout
 echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
 echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
-mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
+mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation >> $FINAL_DIR/stdout
 
 #Create argument list for starpu_fxt_tool
 cd $FINAL_DIR
diff --git a/jobs/implicit_4N_chebyshev.sh b/jobs/implicit_4N_chebyshev.sh
index ee3e14b842f0e896022f3b0ffeff5133f133834e..174645da129616db1bb771bc33b8907a070f721a 100644
--- a/jobs/implicit_4N_chebyshev.sh
+++ b/jobs/implicit_4N_chebyshev.sh
@@ -26,6 +26,7 @@ export STARPU_NCPU=24
 export NB_PARTICLE_PER_NODE=12500000
 export STARPU_FXT_PREFIX=$SLURM_JOB_ID
 export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
+export STARPU_COMM_STATS=1
 $NUMACTL=numactl --interleave=all
 mkdir $FINAL_DIR
 echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
@@ -37,7 +38,7 @@ echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
 echo "Algorithm: implicit" >> $FINAL_DIR/stdout
 echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
 echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
-mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
+mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation >> $FINAL_DIR/stdout
 
 #Create argument list for starpu_fxt_tool
 cd $FINAL_DIR
diff --git a/jobs/implicit_8N_chebyshev.sh b/jobs/implicit_8N_chebyshev.sh
index 7514b28c10837db2955eaf605b35bab5912cfe0b..a8090b4c15c161f5480d0b038aebbcf9a7455863 100644
--- a/jobs/implicit_8N_chebyshev.sh
+++ b/jobs/implicit_8N_chebyshev.sh
@@ -26,6 +26,7 @@ export STARPU_NCPU=24
 export NB_PARTICLE_PER_NODE=6250000
 export STARPU_FXT_PREFIX=$SLURM_JOB_ID
 export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
+export STARPU_COMM_STATS=1
 $NUMACTL=numactl --interleave=all
 mkdir $FINAL_DIR
 echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
@@ -37,7 +38,7 @@ echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
 echo "Algorithm: implicit" >> $FINAL_DIR/stdout
 echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
 echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
-mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
+mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation >> $FINAL_DIR/stdout
 
 #Create argument list for starpu_fxt_tool
 cd $FINAL_DIR