diff --git a/README.md b/README.md
index 5a2e998fcfe2fb6d3c2f61b37ab62e332aa73467..6cae327e5d3e3c1f259522bcb99b6a08b74d2f10 100644
--- a/README.md
+++ b/README.md
@@ -115,7 +115,7 @@ usage: python3 runtest.py [-h] [-p] [-t [TIMEOUT]] [-m [MEM]] pkg data
 |Short|Long|Default|Description|
 |-|-|-|-|
 |`-p`|`--perf`||perf stat the benchmark and put the result in perflog.txt|
-|`-t [TIMEOUT]`|`--timeout [TIMEOUT]`|`None`|To run the benchmark with a timeout|
+|`-t [TIMEOUT]`|`--timeout [TIMEOUT]`|`None`|To run the benchmark with a timeout. Should be written as `<number><unit>` where `<unit>` may be nothing (this implicitely means seconds), s, m or h, e.g. 1h|
 |`-m [MEM]`|`--mem [MEM]`|`None`|Maximum amount of memory used. Should be written as `<number><unit>` where `<unit>` may be nothing, K, M or G, e.g. 100M|  
 
 ### [runall.py](/runall.py)
@@ -309,4 +309,4 @@ usage: python3 latextable.py [-h] entries
 
 |Argument|Description|
 |-|-|
-|`table`|A table file|
\ No newline at end of file
+|`table`|A JSON table file (should be generated via [synthethize.py](/synthesize.py))|
\ No newline at end of file
diff --git a/gen.sage b/gen.sage
index b69d068584771506e4a90461d8e02d53a07b4dc7..af15c6cb23f6d752d19bc5d00e7b717c52cd40e7 100644
--- a/gen.sage
+++ b/gen.sage
@@ -1,12 +1,12 @@
 import argparse
 import json
 import os
-from itertools import product
+import itertools
 
-parser = argparse.ArgumentParser(description="a description")
-parser.add_argument("h", choices = ["linear", "newton"], help = "Homotopy type.")
-parser.add_argument("type", nargs = '+', help = "Type of target system, followed by its parameters. It can be dense <d1> ... <dn>, structured <n> <d> <l> or katsura <n>.")
-parser.add_argument("--paths", nargs = '?', default = "all", help = "Number of paths tracked. If not specified, all paths are tracked.")
+parser = argparse.ArgumentParser(description="A sage script that generates examples from different families.")
+parser.add_argument("h", choices = ["linear", "newton"], help = "Homotopy type")
+parser.add_argument("type", nargs = '+', help = "Type of target system, followed by its parameters. It can be dense <d1> ... <dn>, structured <n> <d> <l> or katsura <n>")
+parser.add_argument("--paths", nargs = '?', default = "all", help = "Number of paths tracked. Should be either an positive integer or all")
 args = parser.parse_args()
 
 h_type = args.h
@@ -32,10 +32,11 @@ parameter = "t"
 CP = PolynomialRing(CC, names = variables)
 CPt = PolynomialRing(CC, names = ['t'] + variables)
 
+print("Generating target system ...")
 if f_type == "dense":
     dictlist = [((1 + sum(CP.gens()))^di).dict() for di in degrees]
     for dict in dictlist:
-        for k in dict.keys():
+        for k in dict:
             dict[k] = normalvariate(0, 1) + I*normalvariate(0, 1)
 
     T = [CP(dict) for dict in dictlist]
@@ -51,17 +52,18 @@ elif f_type == "katsura":
 system = []
 fiber = []
 
+print("Generating homotopy and fiber ...")
 if h_type == "linear":
     S = [(normalvariate(0, 1) + I*normalvariate(0, 1))*(p^di - 1) for di, p in zip(degrees, CP.gens())]
     if f_type == "structured":
         system = [f"t*({ft}) + (1 - t)*({fs})" for ft, fs in zip(T, S)]
     else:
-        system = [CPt.gens()[0]*CPt(ft) + (1 - CPt.gens()[0])*CPt(fs) for ft, fs in zip(T, S)]
+        system = [str(CPt.gens()[0]*CPt(ft) + (1 - CPt.gens()[0])*CPt(fs)) for ft, fs in zip(T, S)]
 
-    exps = [[CC(exp(k*2*CC.gen()*pi/di)) for k in range(di)] for di in degrees]
+    exps = [[str(CC(exp(k*2*CC.gen()*pi/di))) for k in range(di)] for di in degrees]
     
     if args.paths == "all":
-        fiber = list(product(*exps))
+        fiber = list(itertools.product(*exps))
     else:
         n_paths = int(args.paths)
         fiber = [[choice(exp) for exp in exps] for _ in range(n_paths)]
@@ -74,16 +76,13 @@ elif h_type == "newton":
     for z, v in zip(fiber[0], CP.gens()):
         D[str(v)] = z
     
-    T_fiber = [sage_eval(f, D) for f in T]
+    T_fiber = [sage_eval(str(f), D) for f in T]
 
     if f_type == "structured":
         system = [f"{f} - (1 - t)*({c})" for f, c in zip(T, T_fiber)]
     else:
-        system = [CPt(f) - (1 - CPt.gens()[0])*CPt(c) for f, c in zip(T, T_fiber)]
-    
-
-system_str = [str(f) for f in system]
-fiber_str = [[str(zi) for zi in z] for z in fiber]
+        system = [str(CPt(f) - (1 - CPt.gens()[0])*CPt(c)) for f, c in zip(T, T_fiber)]
+    fiber = [[str(zi) for zi in z] for z in fiber]
 
 name = h_type + "_" + '-'.join(args.type) + "_" + args.paths + "-paths"
 
@@ -91,6 +90,8 @@ index = 1
 while os.path.exists(name + "_" + str(index) + ".json"):
     index += 1
 
+print("Writting on test data file ...")
+
 data_file = open("data/" + name + "_" + str(index) + ".json", "w")
-json.dump({"system": system_str, "variables": variables, "parameters": [parameter], "path": [["0.0"], ["1.0"]], "fiber": fiber_str}, data_file, indent = 2)
+json.dump({"system": system, "variables": variables, "parameters": [parameter], "path": [["0.0"], ["1.0"]], "fiber": fiber}, data_file, indent = 2)
 data_file.close()
\ No newline at end of file
diff --git a/latextable.py b/latextable.py
index 3ac6da52acac4299e5d8f7e07b23d6db08157b00..85bf292397b031227c0b496c545a0cd276cf5e9c 100644
--- a/latextable.py
+++ b/latextable.py
@@ -3,8 +3,8 @@ import argparse
 import os
 import re
 
-parser = argparse.ArgumentParser()
-parser.add_argument("table", help="A table file")
+parser = argparse.ArgumentParser(description = "A python script to generate a latex output out of a table of results.")
+parser.add_argument("table", help="A JSON table file (should be generated via synthetize.py)")
 args = parser.parse_args()
 
 assert os.path.exists(args.table), f"{args.table} not found"
diff --git a/runall.py b/runall.py
index f4cddf83200ec91156c6bb0cd17593975d553e76..c6e7177bdbfe14c6b58b1e68fe8c55e9d3ffa84b 100644
--- a/runall.py
+++ b/runall.py
@@ -5,8 +5,8 @@ import glob
 
 parser = argparse.ArgumentParser()
 parser.add_argument("-p", "--perf", action = 'store_true', default = False, help = "perf stat the benchmark and put the result in perflog.txt")
-parser.add_argument("-t", "--timeout", nargs = "?", const = 300, help = "to run the benchmark with a timeout. The maximum time is then written in running")
-parser.add_argument("-m", "--mem", nargs = "?", const = 8, help = "maximum amount of memory used (in GB)")
+parser.add_argument("-t", "--timeout", nargs = "?", const = 300, help = "To run the benchmark with a timeout. Should be written as <number><unit> where <unit> may be nothing (this implicitely means seconds), s, m or h, e.g. 1h")
+parser.add_argument("-m", "--mem", nargs = "?", const = 8, help = "Maximum amount of memory used for the different tests. Should be written as <number><unit> where <unit> may be nothing, K, M or G, e.g. 100M")
 parser.add_argument("-n", action = 'store_true', help = "To only run benchmarks not previously done")
 parser.add_argument("-P", "--packages", nargs = "?", const = ",".join([os.path.basename(os.path.splitext(path)[0]) for path in glob.glob("packages/*.sage")]), default = ",".join([os.path.basename(os.path.splitext(path)[0]) for path in glob.glob("packages/*.sage")]), help = "comma separated list of packages to test. Default : %(default)s")
 args = parser.parse_args()
diff --git a/runtest.py b/runtest.py
index 17040c3f676d3966c28b5284c5edf7cc8877136b..c32322465f871e62ee88387e5775cc73dc6cc24e 100644
--- a/runtest.py
+++ b/runtest.py
@@ -7,10 +7,10 @@ import re
 from datetime import datetime
 
 parser = argparse.ArgumentParser(description = "A description")
-parser.add_argument("pkg", help = "the pkg which is tested. Should be pkgs/<pkg_name>.sage")
-parser.add_argument("data", help = "the system which is tested. Shoud be data/<test_name>.json")
+parser.add_argument("pkg", help = "The package which is tested. Should be packages/<pkg_name>.sage e.g. packages/algpath.sage")
+parser.add_argument("data", help = "The system which is tested. Shoud be data/<test_name>.json e.g. data/linear_dense-10_all-paths_1.json")
 parser.add_argument("-p", "--perf", action = 'store_true', default = False, help = "perf stat the benchmark and put the result in perflog.txt")
-parser.add_argument("-t", "--timeout", nargs = "?", const = "300", help = "to run the benchmark with a timeout. The maximum time is then written in running")
+parser.add_argument("-t", "--timeout", nargs = "?", const = "300", help = "To run the benchmark with a timeout. Should be written as <number><unit> where <unit> may be nothing (this implicitely means seconds), s, m or h, e.g. 1h")
 parser.add_argument("-m", "--mem", nargs = "?", const = "8G", help = "maximum amount of memory used. Should be written as <number><unit> where <unit> may be nothing, K, M or G, e.g. 100M")
 args = parser.parse_args()
 
@@ -107,7 +107,7 @@ print("Running script...")
 
 cmd = ["systemd-run", "--scope", "--user", "-p", f"MemoryMax={memory_b}", "-p", "MemorySwapMax=0"]*(memory_b != None) + ["perf", "stat", "-o", "perflog.txt"]*args.perf + ["./command.sh"]
 
-p = subprocess.Popen(cmd, start_new_session = True, stdout = out_file, stderr = log_file)
+p = subprocess.Popen(cmd, stdout = out_file, stderr = log_file)
 try:
     _, _ = p.communicate(timeout = timeout_s)
     log_file.close()