diff --git a/README.md b/README.md
index a67c26213161bdc27a0b1a329bfb4c675b736583..b885ca535753b318b0ec6f6bf2bd97b5460f8b22 100644
--- a/README.md
+++ b/README.md
@@ -62,3 +62,10 @@ More functions in API:
 - `ge, gt, le, lt, eq, ne` ?
 - `min, max, min_by, max_by, min_by_key, max_by_key` ?
 - `product, sum, reduce` ?
+
+Micro benchmark measurement weirdness:
+
+- Preemptive2 seems stable around 16% overhead in release and release-with-debug
+- Preemptive is unstable 3% in release and 45% in release-with-debug
+- TODO: keep only the Preemptive1 and try with/without debug, look and compare
+  asm maybe insert a call to a function with inline never to compare more easily
diff --git a/examples/overhead.rs b/examples/overhead.rs
index b9f5a176c844de92009603f19b3c9b2ce7175d93..686103ffaee616bdfacdb7d50e751a35ff911459 100644
--- a/examples/overhead.rs
+++ b/examples/overhead.rs
@@ -8,9 +8,9 @@ use std::ops::Range;
 macro_rules! timeof {
     ( $function:expr, $( $arg:expr ),*) => {
         {
-        let start = std::time::Instant::now();
+        let start = std::hint::black_box(std::time::Instant::now());
         std::hint::black_box($function($(std::hint::black_box($arg),)*));
-        start.elapsed()
+        std::hint::black_box(start.elapsed())
         }
     };
 }
@@ -19,9 +19,9 @@ macro_rules! timeof {
 macro_rules! timeofasync {
     ( $function:expr, $( $arg:expr ),*) => {
         embassy_futures::block_on(async {
-            let start = std::time::Instant::now();
+            let start = std::hint::black_box(std::time::Instant::now());
             std::hint::black_box($function($(std::hint::black_box($arg),)*)).await;
-            start.elapsed()
+            std::hint::black_box(start.elapsed())
         })
     };
 }
@@ -55,15 +55,15 @@ struct Args {
 fn main() {
     let args = Args::parse();
     for _ in 0..args.repeat {
-        let time_ref = timeof!(Range::for_each, 0..args.n_iter, |_| work(
-            args.duration_task
-        ));
         let time_auto = timeofasync!(
             Range::preemptive_for_each,
             0..args.n_iter,
             |_| work(args.duration_task),
             Duration::from_micros(args.reactivity_us)
         );
+        let time_ref = timeof!(Range::for_each, 0..args.n_iter, |_| work(
+            args.duration_task
+        ));
         let time_manual = timeofasync!(
             Range::preemptive_for_each_fixed_block,
             0..args.n_iter,
diff --git a/scripts/heatmap.py b/scripts/heatmap.py
index d7600ef4d9deb77c7b845efaa68a52b94cb8b0ba..210032f0859c5678d6d0a3c9b5d730905dfd4f43 100755
--- a/scripts/heatmap.py
+++ b/scripts/heatmap.py
@@ -3,89 +3,98 @@
 import argparse
 import subprocess
 import itertools
-from collections.abc import Callable
+from collections.abc import Callable, Iterable
 from bisect import bisect_left
 import os
 import re
+from dataclasses import dataclass
 import pandas
 
 import matplotlib.pyplot as plt
 import numpy as np
 
-PARAMETERS = {
+DEFAULT_PARAMETERS = {
     "Reactivity": [10],
-    "DurationTask": [1, 10, 100, 1_000],
+    "DurationTask": [1, 10, 100],
     "N_Iter": [100, 1_000, 10_000],
 }
-CSV_COLS = [
-    "Reactivity",
-    "DurationTask",
-    "N_Iter",
-    "Baseline",
-    "Preemptive",
-    "PreemptiveFixed",
-    "Preemptive2",
-]
-N_REPEAT = 1000
-
-EXEC = "target/release/examples/overhead"
-REGEX_BASELINE = re.compile(r"Baseline: (\d+)")
-REGEX_PREEMPT = re.compile(r"Preemptive: (\d+)")
-REGEX_PREEMPT2 = re.compile(r"PreemptiveFixed: (\d+)")
-REGEX_PREEMPT3 = re.compile(r"Preemptive2: (\d+)")
-
-
-def collect_heatmap(out_csv: str, repeat: int):
-    if os.path.exists(out_csv):
-        user_input = input("Remove old csv (y/n) ?")
-        if user_input.startswith("y"):
-            os.remove(out_csv)
-        else:
-            print("Do nothing")
-            return
-    with open(out_csv, "w") as f:
-        f.write(",".join(CSV_COLS) + "\n")
-        # this should be in the same order than declared since dict garanties this
-        for reac, dur, niter in itertools.product(*PARAMETERS.values()):
-            out = subprocess.run(
-                [
-                    EXEC,
-                    "--duration-task",
-                    str(dur),
-                    "--reactivity-us",
-                    str(reac),
-                    "--n-iter",
-                    str(niter),
-                    "--repeat",
-                    str(repeat),
-                    "--output-ns",
-                ],
-                check=True,
-                text=True,
-                stdout=subprocess.PIPE,
-            )
-            assert out.stdout
-            baselines = re.finditer(REGEX_BASELINE, out.stdout)
-            premptives = re.finditer(REGEX_PREEMPT, out.stdout)
-            premptives2 = re.finditer(REGEX_PREEMPT2, out.stdout)
-            premptives3 = re.finditer(REGEX_PREEMPT3, out.stdout)
-            for cb, cp, cp2, cp3 in zip(
-                baselines, premptives, premptives2, premptives3
-            ):
-                f.write(
-                    f"{reac},{dur},{niter},{cb.group(1)},{cp.group(1)},{cp2.group(1)},{cp3.group(1)}\n"
+DEFAULT_MEASUREMENTS = ["Baseline", "Preemptive", "PreemptiveFixed", "Preemptive2"]
+DEFAULT_REPEAT = 1000
+
+
+@dataclass
+class CollectorCSV:
+    out_csv: str
+    repeat: int
+    parameters: dict[str, list[int]]
+    measurements: list[str]
+    profile: str
+
+    def params_product(self) -> Iterable[dict[str, int]]:
+        tmp: list[list[tuple[str, int]]] = [
+            [(name, v) for v in values] for name, values in self.parameters.items()
+        ]
+        products = itertools.product(*tmp)
+        return ({k: v for k, v in prod} for prod in products)
+
+    def collect(self) -> None:
+        if os.path.exists(self.out_csv):
+            user_input = input("Remove old csv (y/n) ?")
+            if user_input.startswith("y"):
+                os.remove(self.out_csv)
+            else:
+                print("Do nothing")
+                return
+        subprocess.run(
+            ["cargo", "build", "--profile", self.profile, "--example", "overhead"],
+            check=True,
+        )
+        assert self.measurements
+        csv_cols = list(self.parameters) + self.measurements
+        with open(self.out_csv, "w") as f:
+            f.write(",".join(csv_cols) + "\n")
+            # this should be in the same order than declared since dict garanties this
+            for param in self.params_product():
+                out = subprocess.run(
+                    [
+                        f"target/{self.profile}/examples/overhead",
+                        "--duration-task",
+                        str(param["DurationTask"]),
+                        "--reactivity-us",
+                        str(param["Reactivity"]),
+                        "--n-iter",
+                        str(param["N_Iter"]),
+                        "--repeat",
+                        str(self.repeat),
+                        "--output-ns",
+                    ],
+                    check=True,
+                    text=True,
+                    stdout=subprocess.PIPE,
                 )
-            print(f"{reac=} {dur=} {niter=} done")
+                assert out.stdout
+                iterators = [
+                    re.finditer(f"{k}: (\\d+)", out.stdout) for k in self.measurements
+                ]
+                params = ",".join(map(str, param.values()))
+                for _ in range(self.repeat):
+                    meas = ",".join(next(it).group(1) for it in iterators)
+                    f.write(f"{params},{meas}\n")
+                print(f"{param=} done")
 
 
 def plot_heatmap(path: str, rate: float):
     df = pandas.read_csv(path)
     print(df)
     fig, ax = plt.subplots()
+    niters = df["N_Iter"].unique()
+    durs = df["DurationTask"].unique()
+    reacs = df["Reactivity"].unique()
+    assert len(reacs) == 1  # for now
 
-    data = np.zeros((len(PARAMETERS["N_Iter"]), len(PARAMETERS["DurationTask"])))
-    for i, niter in enumerate(PARAMETERS["N_Iter"]):
-        for j, dur in enumerate(PARAMETERS["DurationTask"]):
+    data = np.zeros((len(niters), len(durs)))
+    for i, niter in enumerate(niters):
+        for j, dur in enumerate(durs):
             df_filtered = df[(df["N_Iter"] == niter) & (df["DurationTask"] == dur)]
             baselines = df_filtered["Baseline"].values
             preemptives = df_filtered["Preemptive"].values
@@ -105,62 +114,62 @@ def plot_heatmap(path: str, rate: float):
     ax.imshow(data)
     fig.tight_layout()
 
-    def settick(set_func: Callable, key: str):
-        set_func(list(range(len(PARAMETERS[key]))), list(map(str, PARAMETERS[key])))
+    def settick(set_func: Callable, arr: np.ndarray):
+        set_func(list(range(len(arr))), list(map(str, arr)))
 
-    settick(ax.set_xticks, "DurationTask")
-    settick(ax.set_yticks, "N_Iter")
+    settick(ax.set_xticks, durs)
+    settick(ax.set_yticks, niters)
     ax.set_xlabel("Duration of each iteration in approx e-7 secs.")
     ax.set_ylabel("Number of iterations.")
     ax.set_title(
-        f"Overhead in % of the iteration using preemptive iter vs native iter. (reactivity = {PARAMETERS['Reactivity'][0]} μs, confidence = {rate*100:.2f}%)"
+        f"Overhead in % of the iteration using preemptive iter vs native iter. (reactivity = {reacs[0]} μs, confidence = {rate*100:.2f}%)"
     )
     plt.show()
 
 
-def get_overhead(baselines: np.ndarray, preemptives: np.ndarray) -> np.ndarray:
-    overhead2 = np.zeros(len(baselines) * len(preemptives))
-    i = 0
-    for base in baselines:
-        for t in preemptives:
-            overhead2[i] = (t - base) / base * 100.0
-            i += 1
-    overhead2.sort()
-    return overhead2
+def get_overhead(base: np.ndarray, other: np.ndarray) -> np.ndarray:
+    overhead = np.fromiter(
+        ((o - b) / b * 100.0 for o, b in itertools.product(other, base)), np.float64
+    )
+    # remove outliers
+    lower_bound, upper_bound = -20.0, 1000.0
+    filter_arr = (overhead > lower_bound) & (overhead < upper_bound)
+    overhead = overhead[filter_arr]
+    # sort for hist and cdf purposes
+    overhead.sort()
+    print(len(overhead))
+    return overhead
 
 
-def plot_likelyhood(path: str, n_iter: int, task_dur: int, reac: int):
+def plot_likelyhood(
+    path: str, n_iter: int, task_dur: int, reac: int, measurements: list[str]
+):
     print(f"{task_dur=} {n_iter=} {reac=}")
     df = pandas.read_csv(path)
-    df_filtered = df[
+    df = df[
         (df["N_Iter"] == n_iter)
         & (df["DurationTask"] == task_dur)
         & (df["Reactivity"] == reac)
     ]
-    assert len(df_filtered) > 0
-    print(len(df_filtered))
-    baselines = df_filtered["Baseline"].values
-    preemptives = df_filtered["Preemptive"].values
-    overhead2 = get_overhead(baselines, preemptives)
-    preemptives_fixed = df_filtered["PreemptiveFixed"].values
-    overhead2fixed = get_overhead(baselines, preemptives_fixed)
-    preemptives2 = df_filtered["Preemptive2"].values
-    overhead3 = get_overhead(baselines, preemptives2)
-    ecdf = np.linspace(0, 1, len(overhead2))
-    plt.plot(overhead2, ecdf, label="ecdf-preemptive-auto")
-    plt.plot(overhead2fixed, ecdf, label="ecdf-preemptive-fixed")
-    plt.plot(overhead3, ecdf, label="ecdf-preemptive-auto2")
+    assert len(df) > 0
+    print("repeat =", len(df))
+    assert "Baseline" in df.columns
+    base = df["Baseline"].values
+    overheads = {
+        m: get_overhead(base, df[m].values) for m in measurements if m != "Baseline"
+    }
+    for label, overhead in overheads.items():
+        plt.plot(overhead, np.linspace(0, 1, len(overhead)), label=label)
     plt.xlabel("x in %")
-    plt.xlim([-5, 50])
     plt.ylabel("P(X < x)")
     plt.legend()
     plt.title("Probability than the real overhead is below x")
     for rate in (0.85, 0.90, 0.95, 0.99):
-        x_rate = overhead2[bisect_left(ecdf, rate)]
-        x_rate_fix = overhead2fixed[bisect_left(ecdf, rate)]
-        print(
-            f"P(Overhead < x) < {100*rate:.0f}% for x={x_rate:.2f}% (auto) x={x_rate_fix:.2f}% (fixed)"
-        )
+        print(f"Probability than overhead is below x with {100*rate:.0f}% confidence :")
+        for label, sorted_data in overheads.items():
+            idx = int(rate * len(sorted_data))
+            x_rate = sorted_data[idx]
+            print(f"\t{label} x={x_rate:.2f}%")
     plt.show()
 
 
@@ -168,19 +177,55 @@ def main():
     parser = argparse.ArgumentParser(
         formatter_class=argparse.ArgumentDefaultsHelpFormatter
     )
+    parser.add_argument(
+        "--measurements",
+        nargs="+",
+        help="List of Measurements to consider.",
+        type=str,
+        default=DEFAULT_MEASUREMENTS,
+    )
     subparsers = parser.add_subparsers(dest="subparser_name")
     parser_collect = subparsers.add_parser(
         "collect", formatter_class=argparse.ArgumentDefaultsHelpFormatter
     )
     parser_collect.add_argument("output_csv", help="File to generate.")
+    parser_collect.add_argument(
+        "--profile",
+        default="release",
+        help="Profile to use with executable.",
+        choices=("release", "release-with-debug"),
+    )
     parser_collect.add_argument(
         "--repeat",
         type=int,
         help="Number of times to repeat each run.",
-        default=N_REPEAT,
+        default=DEFAULT_REPEAT,
+    )
+    parser_collect.add_argument(
+        "--n-iters",
+        nargs="+",
+        help="List of n-iter to benchmarks.",
+        type=int,
+        default=DEFAULT_PARAMETERS["N_Iter"],
+    )
+    parser_collect.add_argument(
+        "--task-durs",
+        nargs="+",
+        help="List of DurationTask to benchmarks.",
+        type=int,
+        default=DEFAULT_PARAMETERS["DurationTask"],
+    )
+    parser_collect.add_argument(
+        "--reacs",
+        nargs="+",
+        help="List of Reactivity to benchmarks.",
+        type=int,
+        default=DEFAULT_PARAMETERS["Reactivity"],
     )
 
-    parser_plot = subparsers.add_parser("plot")
+    parser_plot = subparsers.add_parser(
+        "plot", formatter_class=argparse.ArgumentDefaultsHelpFormatter
+    )
     parser_plot.add_argument("input_csv", help="File to use for plot.")
     parser_plot.add_argument(
         "--confidence-rate",
@@ -189,35 +234,46 @@ def main():
         help="Confidence rate for the value showed on the heatmap. It displays the value x such that P(Overhead < x) = rate.",
     )
 
-    parser_likelyhood = subparsers.add_parser("likelyhood")
+    parser_likelyhood = subparsers.add_parser(
+        "likelyhood", formatter_class=argparse.ArgumentDefaultsHelpFormatter
+    )
     parser_likelyhood.add_argument("input_csv", help="File to use for plot.")
     parser_likelyhood.add_argument(
         "--n-iter",
         help="Value of N_Iter to fix.",
         type=int,
-        default=PARAMETERS["N_Iter"][0],
+        default=DEFAULT_PARAMETERS["N_Iter"][0],
     )
     parser_likelyhood.add_argument(
         "--task-dur",
         help="Value of DurationTask to fix.",
         type=int,
-        default=PARAMETERS["DurationTask"][0],
+        default=DEFAULT_PARAMETERS["DurationTask"][0],
     )
     parser_likelyhood.add_argument(
         "--reac",
         help="Value of Reactivity to fix.",
         type=int,
-        default=PARAMETERS["Reactivity"][0],
+        default=DEFAULT_PARAMETERS["Reactivity"][0],
     )
     args = parser.parse_args()
 
     match args.subparser_name:
         case "collect":
-            collect_heatmap(args.output_csv, args.repeat)
+            params = {
+                "Reactivity": args.reacs,
+                "DurationTask": args.task_durs,
+                "N_Iter": args.n_iters,
+            }
+            CollectorCSV(
+                args.output_csv, args.repeat, params, args.measurements, args.profile
+            ).collect()
         case "plot":
             plot_heatmap(args.input_csv, args.confidence_rate)
         case "likelyhood":
-            plot_likelyhood(args.input_csv, args.n_iter, args.task_dur, args.reac)
+            plot_likelyhood(
+                args.input_csv, args.n_iter, args.task_dur, args.reac, args.measurements
+            )
         case other:
             print(f"Unknown command: {other}")
 
diff --git a/src/adapt.rs b/src/adapt.rs
index 0fd1e8597619afd31382b89598587c191375679c..a169790323cfd69a71bc306b2c857ddc7452f0d3 100644
--- a/src/adapt.rs
+++ b/src/adapt.rs
@@ -17,6 +17,7 @@ impl Divisible for std::ops::Range<u32> {
     }
 }
 
+#[allow(async_fn_in_trait)]
 pub trait AsyncIterator: Iterator + Divisible {
     async fn async_for_each<F: FnMut(Self::Item)>(self, mut f: F, reactivity: Duration) {
         self.async_fold((), move |(), item| f(item), reactivity)