Mentions légales du service

Skip to content
Snippets Groups Projects
Commit 8ac03d35 authored by Quentin Guilloteau's avatar Quentin Guilloteau
Browse files

update to take filesize into account

parent 17bca60e
Branches
No related tags found
No related merge requests found
...@@ -14,10 +14,18 @@ FLAVOURS = [ ...@@ -14,10 +14,18 @@ FLAVOURS = [
] ]
NB_NODES = [ NB_NODES = [
4, #5,
6, 9, 17, 25
8, # 13,
16 #17, 21, 33 , 65
#, 25
]
BLOCK_SIZES = [
"1M",
"10M",
"100M",
"1G"
] ]
...@@ -25,7 +33,7 @@ rule all: ...@@ -25,7 +33,7 @@ rule all:
input: input:
# expand(["nxc/build/composition::{file}"], file=FLAVOURS), # expand(["nxc/build/composition::{file}"], file=FLAVOURS),
# expand(["{result_folder}/csv_zip/results_csv_ior_{nb_nodes}_nodes_{flavour}.zip"], result_folder=RESULT_FOLDER, flavour=FLAVOURS, nb_nodes=NB_NODES), # expand(["{result_folder}/csv_zip/results_csv_ior_{nb_nodes}_nodes_{flavour}.zip"], result_folder=RESULT_FOLDER, flavour=FLAVOURS, nb_nodes=NB_NODES),
expand(["{path_here}/data/json_zip/results_ior_{nb_nodes}_nodes_{flavour}.zip"], path_here=PATH_HERE, flavour=FLAVOURS, nb_nodes=NB_NODES), expand(["{path_here}/data/json_zip/results_ior_{nb_nodes}_nodes_{block_size}_block_size_{flavour}.zip"], path_here=PATH_HERE, flavour=FLAVOURS, nb_nodes=NB_NODES, block_size=BLOCK_SIZES),
# expand(["{result_folder}/csv/{nb_nodes}_{flavour}.csv"], result_folder=RESULT_FOLDER, flavour=FLAVOURS, nb_nodes=NB_NODES) # expand(["{result_folder}/csv/{nb_nodes}_{flavour}.csv"], result_folder=RESULT_FOLDER, flavour=FLAVOURS, nb_nodes=NB_NODES)
# expand(["data/repeat/iter_{iter}/csv_zip/results_csv_ior_{nb_nodes}_nodes_{flavour}.zip"], iter=[1, 2, 3, 4, 5], nb_nodes=8, flavour=FLAVOURS), # expand(["data/repeat/iter_{iter}/csv_zip/results_csv_ior_{nb_nodes}_nodes_{flavour}.zip"], iter=[1, 2, 3, 4, 5], nb_nodes=8, flavour=FLAVOURS),
# expand(["data/repeat/iter_{iter}/csv/{nb_nodes}_{flavour}.csv"], iter=[1, 2, 3, 4, 5], nb_nodes=8, flavour=FLAVOURS), # expand(["data/repeat/iter_{iter}/csv/{nb_nodes}_{flavour}.csv"], iter=[1, 2, 3, 4, 5], nb_nodes=8, flavour=FLAVOURS),
...@@ -50,9 +58,9 @@ rule run_ior: ...@@ -50,9 +58,9 @@ rule run_ior:
input: input:
"nxc/build/composition::{flavour}" "nxc/build/composition::{flavour}"
output: output:
"{PATH_HERE}/data/json_zip/results_ior_{nb_nodes}_nodes_{flavour}.zip" "{PATH_HERE}/data/json_zip/results_ior_{nb_nodes}_nodes_{block_size}_block_size_{flavour}.zip"
shell: shell:
"cd nxc; nix develop --command python3 script.py --nxc_build_file {PATH_HERE}/{input} --nb_nodes {wildcards.nb_nodes} --result_dir {RESULT_FOLDER} --flavour {wildcards.flavour} --outfile {output} --walltime 2" "cd nxc; nix develop --command python3 script.py --nxc_build_file {PATH_HERE}/{input} --nb_nodes {wildcards.nb_nodes} --block_size {wildcards.block_size} --result_dir {RESULT_FOLDER} --flavour {wildcards.flavour} --outfile {output} --walltime 5"
rule json_to_csv: rule json_to_csv:
input: input:
......
...@@ -41,6 +41,6 @@ in { ...@@ -41,6 +41,6 @@ in {
cat /etc/hosts | grep node | head -n $NB_NODES | awk -v nb_slots="$NB_SLOTS_PER_NODE" '{ print $2 " slots=" nb_slots;}' > my_hosts cat /etc/hosts | grep node | head -n $NB_NODES | awk -v nb_slots="$NB_SLOTS_PER_NODE" '{ print $2 " slots=" nb_slots;}' > my_hosts
mpirun --allow-run-as-root --oversubscribe -mca btl self,vader -np $TOTAL_NB_NODES --hostfile my_hosts ior -f ${iorConfigPerCluster} mpirun --allow-run-as-root --oversubscribe --mca btl ^openib -np $TOTAL_NB_NODES --hostfile my_hosts ior -f ${iorConfigPerCluster}
''; '';
} }
...@@ -15,6 +15,7 @@ class MyEngine(Engine): ...@@ -15,6 +15,7 @@ class MyEngine(Engine):
parser = self.args_parser parser = self.args_parser
parser.add_argument('--nxc_build_file', help='Path to the NXC deploy file') parser.add_argument('--nxc_build_file', help='Path to the NXC deploy file')
parser.add_argument('--nb_nodes', help='Number of nodes') parser.add_argument('--nb_nodes', help='Number of nodes')
parser.add_argument('--block_size', help='Size of the file to write')
parser.add_argument('--walltime', help='walltime in hours') parser.add_argument('--walltime', help='walltime in hours')
parser.add_argument('--result_dir', help='where to store results') parser.add_argument('--result_dir', help='where to store results')
parser.add_argument('--flavour', help='Flavour') parser.add_argument('--flavour', help='Flavour')
...@@ -31,8 +32,10 @@ class MyEngine(Engine): ...@@ -31,8 +32,10 @@ class MyEngine(Engine):
self.flavour = self.args.flavour if self.args.flavour else "g5k-image" self.flavour = self.args.flavour if self.args.flavour else "g5k-image"
site = "grenoble" site = "nancy"
cluster = "dahu" cluster = "gros"
# site = "grenoble"
# cluster = "dahu"
oar_job = reserve_nodes(self.nb_nodes, site, cluster, "deploy" if self.flavour == "g5k-image" else "allow_classic_ssh", walltime=walltime_hours*60*60) oar_job = reserve_nodes(self.nb_nodes, site, cluster, "deploy" if self.flavour == "g5k-image" else "allow_classic_ssh", walltime=walltime_hours*60*60)
self.oar_job_id, site = oar_job[0] self.oar_job_id, site = oar_job[0]
...@@ -49,15 +52,16 @@ class MyEngine(Engine): ...@@ -49,15 +52,16 @@ class MyEngine(Engine):
def run(self): def run(self):
result_dir = self.args.result_dir if self.args.result_dir else os.getcwd() result_dir = self.args.result_dir if self.args.result_dir else os.getcwd()
block_size = self.args.block_size if self.args.block_size else "1G"
zip_archive_name = f"{result_dir}/results_ior_{self.nb_nodes}_nodes_{self.flavour}" zip_archive_name = f"{result_dir}/results_ior_{self.nb_nodes}_nodes_{block_size}_block_size_{self.flavour}"
outfile = self.args.outfile[:-4] if self.args.outfile else zip_archive_name outfile = self.args.outfile[:-4] if self.args.outfile else zip_archive_name
folder_name = f"{result_dir}/expe_nfs_{self.flavour}_{self.nb_nodes}" folder_name = f"{result_dir}/expe_nfs_{self.flavour}_{self.nb_nodes}_{block_size}"
create_folder(folder_name) create_folder(folder_name)
logger.info("Generating IOR config") logger.info("Generating IOR config")
run_ior_config_remote = Remote(f"generate_ior_config {self.nb_nodes - 1}", self.nodes["node"][0], connection_params={'user': 'root'}) run_ior_config_remote = Remote(f"generate_ior_config {self.nb_nodes - 1} {block_size}", self.nodes["node"][0], connection_params={'user': 'root'})
run_ior_config_remote.run() run_ior_config_remote.run()
for nb_node in range(self.nb_nodes - 1, 0, -1): for nb_node in range(self.nb_nodes - 1, 0, -1):
...@@ -86,7 +90,7 @@ class MyEngine(Engine): ...@@ -86,7 +90,7 @@ class MyEngine(Engine):
remove_folder(folder_name) remove_folder(folder_name)
logger.info(f"Giving back the resources") logger.info(f"Giving back the resources")
oardel([(self.oar_job_id, "grenoble")]) oardel([(self.oar_job_id, "nancy")])
def reserve_nodes(nb_nodes, site, cluster, job_type, walltime=3600): def reserve_nodes(nb_nodes, site, cluster, job_type, walltime=3600):
jobs = oarsub([(OarSubmission("{{cluster='{}'}}/nodes={}".format(cluster, nb_nodes), walltime, job_type=[job_type]), site)]) jobs = oarsub([(OarSubmission("{{cluster='{}'}}/nodes={}".format(cluster, nb_nodes), walltime, job_type=[job_type]), site)])
......
...@@ -40,8 +40,8 @@ IOR START ...@@ -40,8 +40,8 @@ IOR START
repetitions=5 repetitions=5
numTasks=${builtins.toString numTasks} numTasks=${builtins.toString numTasks}
segmentCount=4 segmentCount=1
blockSize=128M blockSize=1G
transferSize=4M transferSize=4M
summaryFile=/data/results_ior.json summaryFile=/data/results_ior.json
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment