diff --git a/scripts/heatmap.py b/scripts/heatmap.py
index 210032f0859c5678d6d0a3c9b5d730905dfd4f43..d72595d6a32aa988e3e468b906879a797d7edf87 100755
--- a/scripts/heatmap.py
+++ b/scripts/heatmap.py
@@ -1,6 +1,7 @@
 #! /usr/bin/env python3
 
 import argparse
+from io import TextIOWrapper
 import subprocess
 import itertools
 from collections.abc import Callable, Iterable
@@ -37,6 +38,41 @@ class CollectorCSV:
         products = itertools.product(*tmp)
         return ({k: v for k, v in prod} for prod in products)
 
+    def bench(self, f: TextIOWrapper) -> None:
+        assert self.measurements
+        csv_cols = list(self.parameters) + self.measurements
+        f.write(",".join(csv_cols) + "\n")
+        for param in self.params_product():
+            out = subprocess.run(
+                [
+                    "taskset",
+                    "-c",
+                    "0",
+                    f"target/{self.profile}/examples/overhead",
+                    "--duration-task",
+                    str(param["DurationTask"]),
+                    "--reactivity-us",
+                    str(param["Reactivity"]),
+                    "--n-iter",
+                    str(param["N_Iter"]),
+                    "--repeat",
+                    str(self.repeat),
+                    "--output-ns",
+                ],
+                check=True,
+                text=True,
+                stdout=subprocess.PIPE,
+            )
+            assert out.stdout
+            iterators = [
+                re.finditer(f"{k}: (\\d+)", out.stdout) for k in self.measurements
+            ]
+            params = ",".join(map(str, param.values()))
+            for _ in range(self.repeat):
+                meas = ",".join(next(it).group(1) for it in iterators)
+                f.write(f"{params},{meas}\n")
+            print(f"{param=} done")
+
     def collect(self) -> None:
         if os.path.exists(self.out_csv):
             user_input = input("Remove old csv (y/n) ?")
@@ -49,38 +85,12 @@ class CollectorCSV:
             ["cargo", "build", "--profile", self.profile, "--example", "overhead"],
             check=True,
         )
-        assert self.measurements
-        csv_cols = list(self.parameters) + self.measurements
+        with open("/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", "r") as f:
+            if f.read().strip() != "performance":
+                print("WARNING: CPU scaling is enabled measurement will be noisy")
+        print(self)
         with open(self.out_csv, "w") as f:
-            f.write(",".join(csv_cols) + "\n")
-            # this should be in the same order than declared since dict garanties this
-            for param in self.params_product():
-                out = subprocess.run(
-                    [
-                        f"target/{self.profile}/examples/overhead",
-                        "--duration-task",
-                        str(param["DurationTask"]),
-                        "--reactivity-us",
-                        str(param["Reactivity"]),
-                        "--n-iter",
-                        str(param["N_Iter"]),
-                        "--repeat",
-                        str(self.repeat),
-                        "--output-ns",
-                    ],
-                    check=True,
-                    text=True,
-                    stdout=subprocess.PIPE,
-                )
-                assert out.stdout
-                iterators = [
-                    re.finditer(f"{k}: (\\d+)", out.stdout) for k in self.measurements
-                ]
-                params = ",".join(map(str, param.values()))
-                for _ in range(self.repeat):
-                    meas = ",".join(next(it).group(1) for it in iterators)
-                    f.write(f"{params},{meas}\n")
-                print(f"{param=} done")
+            self.bench(f)
 
 
 def plot_heatmap(path: str, rate: float):
@@ -137,7 +147,9 @@ def get_overhead(base: np.ndarray, other: np.ndarray) -> np.ndarray:
     overhead = overhead[filter_arr]
     # sort for hist and cdf purposes
     overhead.sort()
-    print(len(overhead))
+    print(
+        f"n_sample {len(overhead)} avg {np.average(overhead):.2f} std {np.std(overhead):.2f}"
+    )
     return overhead
 
 
diff --git a/scripts/setup_bench.py b/scripts/setup_bench.py
index d64bd052c424866373631c4613ec1c5d215cb06c..02adc2525ddba1dfc595399f5772697aafb564ca 100755
--- a/scripts/setup_bench.py
+++ b/scripts/setup_bench.py
@@ -2,20 +2,61 @@
 
 import os
 
+N_THREADS = os.cpu_count()
+assert N_THREADS
+CPU_DIR = "/sys/devices/system/cpu"
 
-def main():
-    nb_threads = os.sysconf("SC_NPROCESSORS_CONF")
-    print(f"Setting maximum frequency to all {nb_threads=} Cores.")
-    for i in range(nb_threads):
-        path = f"/sys/devices/system/cpu/cpu{i}/cpufreq/scaling_governor"
-        with open(path, "w") as scaling_gov:
+# a lot of tips came from https://github.com/google/benchmark/blob/main/docs/reducing_variance.md
+# or llvm https://llvm.org/docs/Benchmarking.html
+
+
+def disable_randomize_virtual_addr() -> None:
+    print("Disabling Randomization of Virtual Adress Space")
+    with open("/proc/sys/kernel/randomize_va_space", "w") as f:
+        f.write("0")
+
+
+def disable_freq_scaling() -> None:
+    print("Disabling frequency scaling")
+    for i in range(N_THREADS):
+        with open(f"{CPU_DIR}/cpu{i}/cpufreq/scaling_governor", "w") as scaling_gov:
             scaling_gov.write("performance")
-    print("Successfully set max frequency to all CPUs")
 
-    print("Disabling turbo")
-    with open("/sys/devices/system/cpu/intel_pstate/no_turbo", "w") as no_turbo:
-        no_turbo.write("1")
-    print("Successfully disabled turbo boost")
+
+def disable_intel_turbo() -> None:
+    intel_turbo_path = f"{CPU_DIR}/intel_pstate/no_turbo"
+    if not os.path.exists(intel_turbo_path):
+        return
+    print("Disabling turbo for Intel CPU")
+    with open(intel_turbo_path, "w") as f:
+        f.write("1")
+
+
+def disable_hyperthreading() -> None:
+    print("Disabling SMT i.e hyperthreading")
+    for i in range(N_THREADS):
+        sibling_file = f"{CPU_DIR}/cpu{i}/topology/thread_siblings_list"
+        if not os.path.exists(sibling_file):
+            continue
+        with open(sibling_file, "r") as f:
+            siblings = f.read().strip()
+        sibling_list = [int(s) for s in siblings.split(",")]
+        # skipping the first one assuming it is the primary cpu
+        for sibling in sibling_list[1:]:
+            online_file = f"{CPU_DIR}/cpu{sibling}/online"
+            if os.path.exists(online_file):
+                with open(online_file, "w") as of:
+                    of.write("0")
+
+
+def main():
+    try:
+        disable_freq_scaling()
+        disable_randomize_virtual_addr()
+        disable_hyperthreading()
+    except PermissionError:
+        print("Try to re run this script using sudo!")
+        exit(1)
 
 
 if __name__ == "__main__":