Commit 2cc828aa authored by Martin Khannouz's avatar Martin Khannouz Committed by Berenger Bramas

Few change in jobs to add volume of comm in stdout. Add scripts to

export tarball. Add check for time in python script. Small change in the
timer in the tests (not that much).
parent 34c0a84a
This diff is collapsed.
......@@ -121,8 +121,11 @@ int main(int argc, char* argv[]){
GroupAlgorithm groupalgo(&groupedTree,&groupkernel);
timer.tic();
starpu_fxt_start_profiling();
groupalgo.execute();
std::cout << "Average executed in in " << timer.tacAndElapsed() << "s\n";
starpu_fxt_stop_profiling();
timer.tac();
std::cout << "Average executed in in " << timer.elapsed() << "s\n";
// Validate the result
if(FParameters::existParameter(argc, argv, LocalOptionNoValidate.options) == false){
......
......@@ -140,10 +140,12 @@ int main(int argc, char* argv[]){
GroupAlgorithm groupalgo(&groupedTree,&groupkernel, distributedMortonIndex);
mpiComm.global().barrier();
FTic timerExecute;
starpu_fxt_start_profiling();
groupalgo.execute(operationsToProceed);
mpiComm.global().barrier();
double elapsedTime = timerExecute.tacAndElapsed();
timeAverage(mpi_rank, nproc, elapsedTime);
starpu_fxt_stop_profiling();
timerExecute.tac();
timeAverage(mpi_rank, nproc, timerExecute.elapsed());
// Validate the result
if(FParameters::existParameter(argc, argv, LocalOptionNoValidate.options) == false){
......
......@@ -170,8 +170,10 @@ int main(int argc, char* argv[]){
GroupAlgorithm groupalgo(mpiComm.global(), &groupedTree,&groupkernel);
mpiComm.global().barrier();
timer.tic();
starpu_fxt_start_profiling();
groupalgo.execute();
mpiComm.global().barrier();
starpu_fxt_stop_profiling();
timer.tac();
timeAverage(mpiComm.global().processId(), mpiComm.global().processCount(), timer.elapsed());
//std::cout << "Done " << "(@Algorithm = " << timer.elapsed() << "s)." << std::endl;
......
......@@ -55,7 +55,7 @@ class ScalFMMConfig(object):
return header
def gen_record(self, global_time, runtime_time, task_time, idle_time, scheduling_time, rmem):
def gen_record(self, global_time, runtime_time, task_time, idle_time, scheduling_time, communication_time, rmem):
columns = [
self.model,
self.algorithm,
......@@ -69,7 +69,7 @@ class ScalFMMConfig(object):
task_time,
idle_time,
scheduling_time,
0.0,
communication_time,
rmem,
]
record = ""
......@@ -97,6 +97,7 @@ def get_times_from_trace_file(filename):
idle_time = 0.0
runtime_time = 0.0
scheduling_time = 0.0
communication_time = 0.0
for line in stdout.decode().splitlines():
arr = line.replace("\"", "").split(",")
if arr[0] == "Name":
......@@ -112,7 +113,7 @@ def get_times_from_trace_file(filename):
elif arr[2] == "Other":
idle_time = float(arr[3])
# sys.exit("Invalid time!")
return runtime_time, task_time, idle_time, scheduling_time
return runtime_time, task_time, idle_time, scheduling_time, communication_time
def main():
output_trace_file=""
......@@ -158,7 +159,7 @@ def main():
if re.search("Average", line):
a = re.findall("[-+]?\d*\.\d+|\d+", line)
if len(a) == 1:
global_time = a[0]
global_time = float(a[0])*1000 # Else it is in sec
elif re.search("Total particles", line):
a = re.findall("[-+]?\d*\.\d+|\d+", line)
if len(a) == 1:
......@@ -185,16 +186,22 @@ def main():
config.algorithm = line[line.index(":")+1:].strip()
if (os.path.isfile(trace_filename)): #Time in milli
runtime_time, task_time, idle_time, scheduling_time = get_times_from_trace_file(trace_filename)
runtime_time, task_time, idle_time, scheduling_time, communication_time = get_times_from_trace_file(trace_filename)
else:
print("File doesn't exist " + trace_filename)
sum_time = (runtime_time + task_time + scheduling_time + communication_time)/(config.num_nodes*config.num_threads)
diff_time = float('%.2f'%(abs(global_time-sum_time)/global_time))
if diff_time > 0.01:
print('\033[31m/!\\Timing Error of ' + str(diff_time) + '\033[39m')
# Write a record to the output file.
output_file.write(config.gen_record(float(global_time),
output_file.write(config.gen_record(global_time,
float(runtime_time),
float(task_time),
float(idle_time),
float(scheduling_time),
float(communication_time),
int(rmem)))
main()
......@@ -3,5 +3,9 @@ cd /home/mkhannou/scalfmm/Doc/noDist/implicit
emacs implicit.org --batch -f org-html-export-to-html --kill
ssh scm.gforge.inria.fr "cd /home/groups/scalfmm/htdocs/orgmode/; rm -rf implicit"
cd ..
scp -r implicit scm.gforge.inria.fr:/home/groups/scalfmm/htdocs/orgmode/
rsync -e ssh -avz --delete-after implicit scm.gforge.inria.fr:/home/groups/scalfmm/htdocs/orgmode/
ssh scm.gforge.inria.fr "cd /home/groups/scalfmm/htdocs/orgmode/; chmod og+r implicit -R;"
#Put an up-to-date tarbal
cd /home/mkhannou/scalfmm
./export_tarbal.sh &
#!/bin/bash
mkdir platypus
cd platypus
git clone git+ssh://mkhannou@scm.gforge.inria.fr/gitroot/scalfmm/scalfmm.git >/dev/null 2>&1
cd scalfmm
git checkout mpi_implicit >/dev/null 2>&1
cd ..
rm -rf scalfmm/.git > /dev/null
tar czf scalfmm.tar.gz scalfmm
scp scalfmm.tar.gz scm.gforge.inria.fr:/home/groups/scalfmm/htdocs/orgmode/implicit >/dev/null
cd ..
rm -rf platypus >/dev/null
......@@ -26,6 +26,7 @@ export STARPU_NCPU=24
export NB_PARTICLE_PER_NODE=5000000
export STARPU_FXT_PREFIX=$SLURM_JOB_ID
export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
export STARPU_COMM_STATS=1
$NUMACTL=numactl --interleave=all
mkdir $FINAL_DIR
echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
......@@ -37,7 +38,7 @@ echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
echo "Algorithm: explicit" >> $FINAL_DIR/stdout
echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation >> $FINAL_DIR/stdout
#Create argument list for starpu_fxt_tool
cd $FINAL_DIR
......
......@@ -26,6 +26,7 @@ export STARPU_NCPU=24
export NB_PARTICLE_PER_NODE=50000000
export STARPU_FXT_PREFIX=$SLURM_JOB_ID
export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
export STARPU_COMM_STATS=1
$NUMACTL=numactl --interleave=all
mkdir $FINAL_DIR
echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
......@@ -37,7 +38,7 @@ echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
echo "Algorithm: explicit" >> $FINAL_DIR/stdout
echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation >> $FINAL_DIR/stdout
#Create argument list for starpu_fxt_tool
cd $FINAL_DIR
......
......@@ -26,6 +26,7 @@ export STARPU_NCPU=24
export NB_PARTICLE_PER_NODE=25000000
export STARPU_FXT_PREFIX=$SLURM_JOB_ID
export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
export STARPU_COMM_STATS=1
$NUMACTL=numactl --interleave=all
mkdir $FINAL_DIR
echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
......@@ -37,7 +38,7 @@ echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
echo "Algorithm: explicit" >> $FINAL_DIR/stdout
echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation >> $FINAL_DIR/stdout
#Create argument list for starpu_fxt_tool
cd $FINAL_DIR
......@@ -52,4 +53,3 @@ cd ..
##Move the result into a directory where all result goes
mv $FINAL_DIR jobs_result
......@@ -26,6 +26,7 @@ export STARPU_NCPU=24
export NB_PARTICLE_PER_NODE=12500000
export STARPU_FXT_PREFIX=$SLURM_JOB_ID
export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
export STARPU_COMM_STATS=1
$NUMACTL=numactl --interleave=all
mkdir $FINAL_DIR
echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
......@@ -37,7 +38,7 @@ echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
echo "Algorithm: explicit" >> $FINAL_DIR/stdout
echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation >> $FINAL_DIR/stdout
#Create argument list for starpu_fxt_tool
cd $FINAL_DIR
......
......@@ -26,6 +26,7 @@ export STARPU_NCPU=24
export NB_PARTICLE_PER_NODE=6250000
export STARPU_FXT_PREFIX=$SLURM_JOB_ID
export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
export STARPU_COMM_STATS=1
$NUMACTL=numactl --interleave=all
mkdir $FINAL_DIR
echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
......@@ -37,7 +38,7 @@ echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
echo "Algorithm: explicit" >> $FINAL_DIR/stdout
echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation >> $FINAL_DIR/stdout
#Create argument list for starpu_fxt_tool
cd $FINAL_DIR
......
......@@ -26,6 +26,7 @@ export STARPU_NCPU=24
export NB_PARTICLE_PER_NODE=5000000
export STARPU_FXT_PREFIX=$SLURM_JOB_ID
export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
export STARPU_COMM_STATS=1
$NUMACTL=numactl --interleave=all
mkdir $FINAL_DIR
echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
......@@ -37,7 +38,7 @@ echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
echo "Algorithm: implicit" >> $FINAL_DIR/stdout
echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation >> $FINAL_DIR/stdout
#Create argument list for starpu_fxt_tool
cd $FINAL_DIR
......
......@@ -26,6 +26,7 @@ export STARPU_NCPU=24
export NB_PARTICLE_PER_NODE=50000000
export STARPU_FXT_PREFIX=$SLURM_JOB_ID
export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
export STARPU_COMM_STATS=1
$NUMACTL=numactl --interleave=all
mkdir $FINAL_DIR
echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
......@@ -37,7 +38,7 @@ echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
echo "Algorithm: implicit" >> $FINAL_DIR/stdout
echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation >> $FINAL_DIR/stdout
#Create argument list for starpu_fxt_tool
cd $FINAL_DIR
......
......@@ -26,6 +26,7 @@ export STARPU_NCPU=24
export NB_PARTICLE_PER_NODE=25000000
export STARPU_FXT_PREFIX=$SLURM_JOB_ID
export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
export STARPU_COMM_STATS=1
$NUMACTL=numactl --interleave=all
mkdir $FINAL_DIR
echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
......@@ -37,7 +38,7 @@ echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
echo "Algorithm: implicit" >> $FINAL_DIR/stdout
echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation >> $FINAL_DIR/stdout
#Create argument list for starpu_fxt_tool
cd $FINAL_DIR
......
......@@ -26,6 +26,7 @@ export STARPU_NCPU=24
export NB_PARTICLE_PER_NODE=12500000
export STARPU_FXT_PREFIX=$SLURM_JOB_ID
export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
export STARPU_COMM_STATS=1
$NUMACTL=numactl --interleave=all
mkdir $FINAL_DIR
echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
......@@ -37,7 +38,7 @@ echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
echo "Algorithm: implicit" >> $FINAL_DIR/stdout
echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation >> $FINAL_DIR/stdout
#Create argument list for starpu_fxt_tool
cd $FINAL_DIR
......
......@@ -26,6 +26,7 @@ export STARPU_NCPU=24
export NB_PARTICLE_PER_NODE=6250000
export STARPU_FXT_PREFIX=$SLURM_JOB_ID
export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
export STARPU_COMM_STATS=1
$NUMACTL=numactl --interleave=all
mkdir $FINAL_DIR
echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
......@@ -37,7 +38,7 @@ echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
echo "Algorithm: implicit" >> $FINAL_DIR/stdout
echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE $NUMACTL ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation >> $FINAL_DIR/stdout
#Create argument list for starpu_fxt_tool
cd $FINAL_DIR
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment