Commit bcf5e113 authored by Martin Khannouz's avatar Martin Khannouz Committed by Berenger Bramas

Add jobs, script to plot graphic and orgmode information.

parent 8c45159f
This diff is collapsed.
#!/usr/bin/python
import getopt
import sys
import math
import copy
import os
import socket
import subprocess
import re
import types
class ScalFMMConfig(object):
num_threads = 1
num_nodes = 1
algorithm = "implicit"
model = "cube"
num_particules = 10000
height = 4
bloc_size = 100
order = 5
def show(self):
print ("=== Simulation parameters ===")
print ("Number of nodes: " + str(self.num_nodes))
print ("Number of threads: " + str(self.num_threads))
print ("Model: " + str(self.model))
print ("Number of particules: " + str(self.num_particules))
print ("Height: " + str(self.height))
print ("Bloc size: " + str(self.bloc_size))
print ("Order: " + str(self.order))
def gen_header(self):
columns = [
"model",
"algo",
"nnode",
"nthreads",
"npart",
"height",
"bsize",
"global_time",
"runtime_time",
"task_time",
"idle_time",
"scheduling_time",
"communication_time",
"rmem",
]
header = ""
for i in range(len(columns)):
if not i == 0:
header += ","
header += "\"" + columns[i] + "\""
header += "\n"
return header
def gen_record(self, global_time, runtime_time, task_time, idle_time, scheduling_time, rmem):
columns = [
self.model,
self.algorithm,
self.num_nodes,
self.num_threads,
self.num_particules,
self.height,
self.bloc_size,
global_time,
runtime_time,
task_time,
idle_time,
scheduling_time,
0.0,
rmem,
]
record = ""
for i in range(len(columns)):
if not i == 0:
record += ","
if (type(columns[i]) is bool or
type(columns[i]) == str):
record += "\""
record += str(columns[i])
if (type(columns[i]) == bool or
type(columns[i]) == str):
record += "\""
record += "\n"
return record
def get_times_from_trace_file(filename):
cmd = "starpu_trace_state_stats.py " + filename
proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
if not proc.returncode == 0:
sys.exit("FATAL: Failed to parse trace.rec!")
return proc.returncode
task_time = 0.0
idle_time = 0.0
runtime_time = 0.0
scheduling_time = 0.0
for line in stdout.decode().splitlines():
arr = line.replace("\"", "").split(",")
if arr[0] == "Name":
continue
if len(arr) >= 4:
if arr[2] == "Runtime":
if arr[0] == "Scheduling":
scheduling_time = float(arr[3])
else:
runtime_time = float(arr[3])
elif arr[2] == "Task":
task_time += float(arr[3])
elif arr[2] == "Other":
idle_time = float(arr[3])
# sys.exit("Invalid time!")
return runtime_time, task_time, idle_time, scheduling_time
def main():
output_trace_file=""
trace_filename="trace.rec"
output_filename="loutre.db"
long_opts = ["help",
"trace-file=",
"output-trace-file=",
"output-file="]
opts, args = getopt.getopt(sys.argv[1:], "ht:i:o:", long_opts)
for o, a in opts:
if o in ("-h", "--help"):
# usage()
print("No help")
sys.exit()
elif o in ("-t", "--trace-file"):
trace_filename = str(a)
elif o in ("-i", "--output-trace-file"):
output_trace_file = str(a)
elif o in ("-o", "--output-file"):
output_filename = str(a)
else:
assert False, "unhandled option"
config=ScalFMMConfig()
rmem = 0
global_time = 0.0
runtime_time = 0.0
task_time = 0.0
idle_time = 0.0
scheduling_time = 0.0
if (os.path.isfile(output_filename)): #Time in milli
output_file = open(output_filename, "a")
else:
output_file = open(output_filename, "w")
output_file.write(config.gen_header())
with open(output_trace_file, "r") as ins:
for line in ins:
if re.search("Average", line):
a = re.findall("[-+]?\d*\.\d+|\d+", line)
if len(a) == 1:
global_time = a[0]
elif re.search("Total Particles", line):
a = re.findall("[-+]?\d*\.\d+|\d+", line)
if len(a) == 1:
config.num_particules = int(a[0])
elif re.search("Total Particles", line):
a = re.findall("[-+]?\d*\.\d+|\d+", line)
if len(a) == 1:
config.num_particules = int(a[0])
elif re.search("Group size", line):
a = re.findall("[-+]?\d*\.\d+|\d+", line)
if len(a) == 1:
config.bloc_size = int(a[0])
elif re.search("Nb node", line):
a = re.findall("[-+]?\d*\.\d+|\d+", line)
if len(a) == 1:
config.num_nodes = int(a[0])
elif re.search("Tree height", line):
a = re.findall("[-+]?\d*\.\d+|\d+", line)
if len(a) == 1:
config.height = int(a[0])
elif re.search("Nb thread", line):
a = re.findall("[-+]?\d*\.\d+|\d+", line)
if len(a) == 1:
config.num_threads = int(a[0])
elif re.search("Model", line):
config.model = line[line.index(":")+1:].strip()
elif re.search("Algorithm", line):
config.algorithm = line[line.index(":")+1:].strip()
if (os.path.isfile(trace_filename)): #Time in milli
runtime_time, task_time, idle_time, scheduling_time = get_times_from_trace_file(trace_filename)
else:
print("File doesn't exist " + trace_filename)
# Write a record to the output file.
output_file.write(config.gen_record(float(global_time),
float(runtime_time),
float(task_time),
float(idle_time),
float(scheduling_time),
int(rmem)))
main()
#!/bin/bash
cd /home/mkhannou/scalfmm/Doc/noDist/implicit
emacs implicit.org --batch -f org-html-export-to-html --kill
ssh scm.gforge.inria.fr "cd /home/groups/scalfmm/htdocs/orgmode/; rm -rf implicit"
cd ..
scp -r implicit scm.gforge.inria.fr:/home/groups/scalfmm/htdocs/orgmode/
ssh scm.gforge.inria.fr "cd /home/groups/scalfmm/htdocs/orgmode/; chmod og+r implicit -R;"
#!/usr/bin/env bash
## name of job
#SBATCH -J explicit_chebyshev_50M_10_node
#SBATCH -p longq
#SBATCH -J explicit_50M_10N
#SBATCH -p special
## Resources: (nodes, procs, tasks, walltime, ... etc)
#SBATCH -N 10
#SBATCH -c 24
# # standard output message
#SBATCH -o explicit_chebyshev_50M_10_node%j.out
#SBATCH --time=00:30:00
# # output error message
#SBATCH -e explicit_chebyshev_50M_10_node%j.err
#SBATCH --mail-type=ALL --mail-user=martin.khannouz@inria.fr
#SBATCH --mail-type=END,FAIL,TIME_LIMIT --mail-user=martin.khannouz@inria.fr
## modules to load for the job
module purge
module load slurm
......@@ -25,16 +24,30 @@ export TREE_HEIGHT=8
export NB_NODE=$SLURM_JOB_NUM_NODES
export STARPU_NCPU=24
export NB_PARTICLE_PER_NODE=5000000
export STARPU_FXT_PREFIX=`pwd`/
echo "===== Explicit MPI ===="
echo "my jobID: " $SLURM_JOB_ID
echo "Model: cube"
echo "Nb node: " $NB_NODE
echo "Nb thread: " $STARPU_NCPU
echo "Tree height: " $TREE_HEIGHT
echo "Group size: " $GROUP_SIZE
echo "Algorithm: explicit"
echo "Particle per node: " $NB_PARTICLE_PER_NODE
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE))
mpiexec -n $NB_NODE ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average
export STARPU_FXT_PREFIX=$SLURM_JOB_ID
export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
mkdir $FINAL_DIR
echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
echo "Model: cube" >> $FINAL_DIR/stdout
echo "Nb node: " $NB_NODE >> $FINAL_DIR/stdout
echo "Nb thread: " $STARPU_NCPU >> $FINAL_DIR/stdout
echo "Tree height: " $TREE_HEIGHT >> $FINAL_DIR/stdout
echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
echo "Algorithm: explicit" >> $FINAL_DIR/stdout
echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
#Create argument list for starpu_fxt_tool
cd $FINAL_DIR
list_fxt_file=`ls ../$STARPU_FXT_PREFIX*`
#Clean to only keep trace.rec
mkdir fxt
for i in $list_fxt_file; do
mv $i fxt
done
cd ..
##Move the result into a directory where all result goes
mv $FINAL_DIR jobs_result
#!/usr/bin/env bash
## name of job
#SBATCH -J explicit_50M_1N
#SBATCH -p defq
## Resources: (nodes, procs, tasks, walltime, ... etc)
#SBATCH -N 1
#SBATCH -c 24
#SBATCH --time=02:00:00
# # output error message
#SBATCH -e explicit_chebyshev_50M_10_node%j.err
#SBATCH --mail-type=END,FAIL,TIME_LIMIT --mail-user=martin.khannouz@inria.fr
## modules to load for the job
module purge
module load slurm
module add compiler/gcc/5.3.0 tools/module_cat/1.0.0 intel/mkl/64/11.2/2016.0.0
. /home/mkhannou/spack/share/spack/setup-env.sh
spack load fftw
spack load hwloc
spack load openmpi
spack load starpu@svn-trunk+fxt
## variable for the job
export GROUP_SIZE=500
export TREE_HEIGHT=8
export NB_NODE=$SLURM_JOB_NUM_NODES
export STARPU_NCPU=24
export NB_PARTICLE_PER_NODE=50000000
export STARPU_FXT_PREFIX=$SLURM_JOB_ID
export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
mkdir $FINAL_DIR
echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
echo "Model: cube" >> $FINAL_DIR/stdout
echo "Nb node: " $NB_NODE >> $FINAL_DIR/stdout
echo "Nb thread: " $STARPU_NCPU >> $FINAL_DIR/stdout
echo "Tree height: " $TREE_HEIGHT >> $FINAL_DIR/stdout
echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
echo "Algorithm: explicit" >> $FINAL_DIR/stdout
echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
#Create argument list for starpu_fxt_tool
cd $FINAL_DIR
list_fxt_file=`ls ../$STARPU_FXT_PREFIX*`
#Clean to only keep trace.rec
mkdir fxt
for i in $list_fxt_file; do
mv $i fxt
done
cd ..
##Move the result into a directory where all result goes
mv $FINAL_DIR jobs_result
#!/usr/bin/env bash
## name of job
#SBATCH -J explicit_50M_2N
#SBATCH -p court
## Resources: (nodes, procs, tasks, walltime, ... etc)
#SBATCH -N 2
#SBATCH -c 24
#SBATCH --time=04:00:00
# # output error message
#SBATCH -e explicit_50M_2N_%j.err
#SBATCH --mail-type=END,FAIL,TIME_LIMIT --mail-user=martin.khannouz@inria.fr
## modules to load for the job
module purge
module load slurm
module add compiler/gcc/5.3.0 tools/module_cat/1.0.0 intel/mkl/64/11.2/2016.0.0
. /home/mkhannou/spack/share/spack/setup-env.sh
spack load fftw
spack load hwloc
spack load openmpi
spack load starpu@svn-trunk+fxt
## variable for the job
export GROUP_SIZE=500
export TREE_HEIGHT=8
export NB_NODE=$SLURM_JOB_NUM_NODES
export STARPU_NCPU=24
export NB_PARTICLE_PER_NODE=25000000
export STARPU_FXT_PREFIX=$SLURM_JOB_ID
export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
mkdir $FINAL_DIR
echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
echo "Model: cube" >> $FINAL_DIR/stdout
echo "Nb node: " $NB_NODE >> $FINAL_DIR/stdout
echo "Nb thread: " $STARPU_NCPU >> $FINAL_DIR/stdout
echo "Tree height: " $TREE_HEIGHT >> $FINAL_DIR/stdout
echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
echo "Algorithm: explicit" >> $FINAL_DIR/stdout
echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
#Create argument list for starpu_fxt_tool
cd $FINAL_DIR
list_fxt_file=`ls ../$STARPU_FXT_PREFIX*`
#Clean to only keep trace.rec
mkdir fxt
for i in $list_fxt_file; do
mv $i fxt
done
cd ..
##Move the result into a directory where all result goes
mv $FINAL_DIR jobs_result
#!/usr/bin/env bash
## name of job
#SBATCH -J explicit_50M_4N
#SBATCH -p court
## Resources: (nodes, procs, tasks, walltime, ... etc)
#SBATCH -N 4
#SBATCH -c 24
#SBATCH --time=04:00:00
# # output error message
#SBATCH -e explicit_chebyshev_50M_10_node%j.err
#SBATCH --mail-type=END,FAIL,TIME_LIMIT --mail-user=martin.khannouz@inria.fr
## modules to load for the job
module purge
module load slurm
module add compiler/gcc/5.3.0 tools/module_cat/1.0.0 intel/mkl/64/11.2/2016.0.0
. /home/mkhannou/spack/share/spack/setup-env.sh
spack load fftw
spack load hwloc
spack load openmpi
spack load starpu@svn-trunk+fxt
## variable for the job
export GROUP_SIZE=500
export TREE_HEIGHT=8
export NB_NODE=$SLURM_JOB_NUM_NODES
export STARPU_NCPU=24
export NB_PARTICLE_PER_NODE=12500000
export STARPU_FXT_PREFIX=$SLURM_JOB_ID
export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
mkdir $FINAL_DIR
echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
echo "Model: cube" >> $FINAL_DIR/stdout
echo "Nb node: " $NB_NODE >> $FINAL_DIR/stdout
echo "Nb thread: " $STARPU_NCPU >> $FINAL_DIR/stdout
echo "Tree height: " $TREE_HEIGHT >> $FINAL_DIR/stdout
echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
echo "Algorithm: explicit" >> $FINAL_DIR/stdout
echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
#Create argument list for starpu_fxt_tool
cd $FINAL_DIR
list_fxt_file=`ls ../$STARPU_FXT_PREFIX*`
#Clean to only keep trace.rec
mkdir fxt
for i in $list_fxt_file; do
mv $i fxt
done
cd ..
##Move the result into a directory where all result goes
mv $FINAL_DIR jobs_result
#!/usr/bin/env bash
## name of job
#SBATCH -J explicit_50M_8N
#SBATCH -p special
## Resources: (nodes, procs, tasks, walltime, ... etc)
#SBATCH -N 8
#SBATCH -c 24
#SBATCH --time=00:30:00
# # output error message
#SBATCH -e explicit_50M_8N%j.err
#SBATCH --mail-type=END,FAIL,TIME_LIMIT --mail-user=martin.khannouz@inria.fr
## modules to load for the job
module purge
module load slurm
module add compiler/gcc/5.3.0 tools/module_cat/1.0.0 intel/mkl/64/11.2/2016.0.0
. /home/mkhannou/spack/share/spack/setup-env.sh
spack load fftw
spack load hwloc
spack load openmpi
spack load starpu@svn-trunk+fxt
## variable for the job
export GROUP_SIZE=500
export TREE_HEIGHT=8
export NB_NODE=$SLURM_JOB_NUM_NODES
export STARPU_NCPU=24
export NB_PARTICLE_PER_NODE=6250000
export STARPU_FXT_PREFIX=$SLURM_JOB_ID
export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
mkdir $FINAL_DIR
echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
echo "Model: cube" >> $FINAL_DIR/stdout
echo "Nb node: " $NB_NODE >> $FINAL_DIR/stdout
echo "Nb thread: " $STARPU_NCPU >> $FINAL_DIR/stdout
echo "Tree height: " $TREE_HEIGHT >> $FINAL_DIR/stdout
echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
echo "Algorithm: explicit" >> $FINAL_DIR/stdout
echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE ./Build/Tests/Release/testBlockedMpiChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
#Create argument list for starpu_fxt_tool
cd $FINAL_DIR
list_fxt_file=`ls ../$STARPU_FXT_PREFIX*`
#Clean to only keep trace.rec
mkdir fxt
for i in $list_fxt_file; do
mv $i fxt
done
cd ..
##Move the result into a directory where all result goes
mv $FINAL_DIR jobs_result
#!/usr/bin/env bash
## name of job
#SBATCH -J implicit_chebyshev_50M_10_node
#SBATCH -p longq
#SBATCH -J implicit_50M_10N
#SBATCH -p special
## Resources: (nodes, procs, tasks, walltime, ... etc)
#SBATCH -N 10
#SBATCH -c 24
# # standard output message
#SBATCH -o implicit_chebyshev_50M_10_node%j.out
#SBATCH --time=00:30:00
# # output error message
#SBATCH -e implicit_chebyshev_50M_10_node%j.err
#SBATCH --mail-type=ALL --mail-user=martin.khannouz@inria.fr
#SBATCH --mail-type=END,FAIL,TIME_LIMIT --mail-user=martin.khannouz@inria.fr
## modules to load for the job
module purge
module load slurm
......@@ -25,17 +24,30 @@ export TREE_HEIGHT=8
export NB_NODE=$SLURM_JOB_NUM_NODES
export STARPU_NCPU=24
export NB_PARTICLE_PER_NODE=5000000
export STARPU_FXT_PREFIX=`pwd`/
echo "===== Implicit MPI ===="
echo "my jobID: " $SLURM_JOB_ID
echo "Model: cube"
echo "Nb node: " $NB_NODE
echo "Nb thread: " $STARPU_NCPU
echo "Tree height: " $TREE_HEIGHT
echo "Group size: " $GROUP_SIZE
echo "Algorithm: implicit"
echo "Particle per node: " $NB_PARTICLE_PER_NODE
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE))
mpiexec -n $NB_NODE ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average
export STARPU_FXT_PREFIX=$SLURM_JOB_ID
export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
mkdir $FINAL_DIR
echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
echo "Model: cube" >> $FINAL_DIR/stdout
echo "Nb node: " $NB_NODE >> $FINAL_DIR/stdout
echo "Nb thread: " $STARPU_NCPU >> $FINAL_DIR/stdout
echo "Tree height: " $TREE_HEIGHT >> $FINAL_DIR/stdout
echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
echo "Algorithm: implicit" >> $FINAL_DIR/stdout
echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
#Create argument list for starpu_fxt_tool
cd $FINAL_DIR
list_fxt_file=`ls ../$STARPU_FXT_PREFIX*`
#Clean to only keep trace.rec
mkdir fxt
for i in $list_fxt_file; do
mv $i fxt
done
cd ..
##Move the result into a directory where all result goes
mv $FINAL_DIR jobs_result
#!/usr/bin/env bash
## name of job
#SBATCH -J implicit_50M_1N
#SBATCH -p defq
## Resources: (nodes, procs, tasks, walltime, ... etc)
#SBATCH -N 1
#SBATCH -c 24
#SBATCH --time=02:00:00
# # output error message
#SBATCH -e implicit_chebyshev_50M_10_node%j.err
#SBATCH --mail-type=END,FAIL,TIME_LIMIT --mail-user=martin.khannouz@inria.fr
## modules to load for the job
module purge
module load slurm
module add compiler/gcc/5.3.0 tools/module_cat/1.0.0 intel/mkl/64/11.2/2016.0.0
. /home/mkhannou/spack/share/spack/setup-env.sh
spack load fftw
spack load hwloc
spack load openmpi
spack load starpu@svn-trunk+fxt
## variable for the job
export GROUP_SIZE=500
export TREE_HEIGHT=8
export NB_NODE=$SLURM_JOB_NUM_NODES
export STARPU_NCPU=24
export NB_PARTICLE_PER_NODE=50000000
export STARPU_FXT_PREFIX=$SLURM_JOB_ID
export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
mkdir $FINAL_DIR
echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
echo "Model: cube" >> $FINAL_DIR/stdout
echo "Nb node: " $NB_NODE >> $FINAL_DIR/stdout
echo "Nb thread: " $STARPU_NCPU >> $FINAL_DIR/stdout
echo "Tree height: " $TREE_HEIGHT >> $FINAL_DIR/stdout
echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
echo "Algorithm: implicit" >> $FINAL_DIR/stdout
echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
#Create argument list for starpu_fxt_tool
cd $FINAL_DIR
list_fxt_file=`ls ../$STARPU_FXT_PREFIX*`
#Clean to only keep trace.rec
mkdir fxt
for i in $list_fxt_file; do
mv $i fxt
done
cd ..
##Move the result into a directory where all result goes
mv $FINAL_DIR jobs_result
#!/usr/bin/env bash
## name of job
#SBATCH -J implicit_50M_2N
#SBATCH -p special
## Resources: (nodes, procs, tasks, walltime, ... etc)
#SBATCH -N 2
#SBATCH -c 24
#SBATCH --time=00:30:00
# # output error message
#SBATCH -e implicit_50M_2N_%j.err
#SBATCH --mail-type=END,FAIL,TIME_LIMIT --mail-user=martin.khannouz@inria.fr
## modules to load for the job
module purge
module load slurm
module add compiler/gcc/5.3.0 tools/module_cat/1.0.0 intel/mkl/64/11.2/2016.0.0
. /home/mkhannou/spack/share/spack/setup-env.sh
spack load fftw
spack load hwloc
spack load openmpi
spack load starpu@svn-trunk+fxt
## variable for the job
export GROUP_SIZE=500
export TREE_HEIGHT=8
export NB_NODE=$SLURM_JOB_NUM_NODES
export STARPU_NCPU=24
export NB_PARTICLE_PER_NODE=25000000
export STARPU_FXT_PREFIX=$SLURM_JOB_ID
export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
mkdir $FINAL_DIR
echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
echo "Model: cube" >> $FINAL_DIR/stdout
echo "Nb node: " $NB_NODE >> $FINAL_DIR/stdout
echo "Nb thread: " $STARPU_NCPU >> $FINAL_DIR/stdout
echo "Tree height: " $TREE_HEIGHT >> $FINAL_DIR/stdout
echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
echo "Algorithm: implicit" >> $FINAL_DIR/stdout
echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
#Create argument list for starpu_fxt_tool
cd $FINAL_DIR
list_fxt_file=`ls ../$STARPU_FXT_PREFIX*`
#Clean to only keep trace.rec
mkdir fxt
for i in $list_fxt_file; do
mv $i fxt
done
cd ..
##Move the result into a directory where all result goes
mv $FINAL_DIR jobs_result
#!/usr/bin/env bash
## name of job
#SBATCH -J implicit_50M_4N
#SBATCH -p special
## Resources: (nodes, procs, tasks, walltime, ... etc)
#SBATCH -N 4
#SBATCH -c 24
#SBATCH --time=00:30:00
# # output error message
#SBATCH -e implicit_chebyshev_50M_10_node%j.err
#SBATCH --mail-type=END,FAIL,TIME_LIMIT --mail-user=martin.khannouz@inria.fr
## modules to load for the job
module purge
module load slurm
module add compiler/gcc/5.3.0 tools/module_cat/1.0.0 intel/mkl/64/11.2/2016.0.0
. /home/mkhannou/spack/share/spack/setup-env.sh
spack load fftw
spack load hwloc
spack load openmpi
spack load starpu@svn-trunk+fxt
## variable for the job
export GROUP_SIZE=500
export TREE_HEIGHT=8
export NB_NODE=$SLURM_JOB_NUM_NODES
export STARPU_NCPU=24
export NB_PARTICLE_PER_NODE=12500000
export STARPU_FXT_PREFIX=$SLURM_JOB_ID
export FINAL_DIR="`pwd`/dir_$SLURM_JOB_ID"
mkdir $FINAL_DIR
echo "my jobID: " $SLURM_JOB_ID > $FINAL_DIR/stdout
echo "Model: cube" >> $FINAL_DIR/stdout
echo "Nb node: " $NB_NODE >> $FINAL_DIR/stdout
echo "Nb thread: " $STARPU_NCPU >> $FINAL_DIR/stdout
echo "Tree height: " $TREE_HEIGHT >> $FINAL_DIR/stdout
echo "Group size: " $GROUP_SIZE >> $FINAL_DIR/stdout
echo "Algorithm: implicit" >> $FINAL_DIR/stdout
echo "Particle per node: " $NB_PARTICLE_PER_NODE >> $FINAL_DIR/stdout
echo "Total particles: " $(($NB_PARTICLE_PER_NODE*$NB_NODE)) >> $FINAL_DIR/stdout
mpiexec -n $NB_NODE ./Build/Tests/Release/testBlockedImplicitChebyshev -nb $NB_PARTICLE_PER_NODE -bs $GROUP_SIZE -h $TREE_HEIGHT -no-validation | grep Average >> $FINAL_DIR/stdout
#Create argument list for starpu_fxt_tool
cd $FINAL_DIR
list_fxt_file=`ls ../$STARPU_FXT_PREFIX*`
#Clean to only keep trace.rec
mkdir fxt
for i in $list_fxt_file; do
mv $i fxt
done
cd ..
##Move the result into a directory where all result goes
mv $FINAL_DIR jobs_result
#!/usr/bin/env bash
## name of job