diff --git a/README.md b/README.md index b837755169e53df591707a6115b42b7b2e8f5ca7..a2236c2f9c571fd8c77141e8dc04c4602c23819f 100644 --- a/README.md +++ b/README.md @@ -26,11 +26,27 @@ The arguments are : `yield_now().await` inserted by our iterator. Typical overhead values is between 0 and 2% at max for large number of -iteration. The bad overhead is about 40% and happens when using few iterations +iteration. The bad overhead is about 30% and happens when using few iterations (<50) with short duration tasks (< 10 μs). ## TODO : +Reduce the Overhead for the worst cases, there are some ideas : + +- Increasing the number of iterations for the base case of exponential grows i.e + starts at 8 or 16 or 32 instead of 1. This reduces the number of calls to + `embassy_time::Instant::elapsed` and also reduce the number of call to fold + and next and do less branching in the while loop. This is easy but can also be + bad for reactivity purposes if the real block size that should be found is + less than this starting size. + +- Change the implementation completely and use a type trick such as in rayon. + This could reduce the overhead of the calls to fold and next. Even if you + don't do any call to `elapsed` the overhead is still of approximately 10% on + the worst cases. This approach could help us to decrease this overhead. + +More functions in API: + - `try_fold, try_for_each` (problem: generic `Try` trait is unstable) - `all, any, collect, count, lt, ge, inspect, last, max, min` - `all, any` ? diff --git a/scripts/heatmap.py b/scripts/heatmap.py index b063e47b78c4ad2168bc6664dbb77eda77f28a1d..cdefe7996097bef61e48ad29648e230346d1d7aa 100755 --- a/scripts/heatmap.py +++ b/scripts/heatmap.py @@ -3,6 +3,7 @@ import argparse import subprocess import itertools +from collections.abc import Callable from bisect import bisect_left import os import re @@ -31,22 +32,6 @@ REGEX_BASELINE = re.compile(r"Baseline: (\d+)") REGEX_PREEMPT = re.compile(r"Preemptive: (\d+)") REGEX_PREEMPT2 = re.compile(r"PreemptiveFixed: (\d+)") -CONFIDENCE_INTERVAL = 0.95 - - -def percent_up(series: pandas.Series) -> np.float64: - # 95 + 5/2 = 97.5 we left 2.5 to the right - return series.quantile(CONFIDENCE_INTERVAL + (1 - CONFIDENCE_INTERVAL) / 2) - - -def percent_low(series: pandas.Series) -> np.float64: - # 5 - 5/2 = 2.5 we left 2.5 to the left - return series.quantile(1 - CONFIDENCE_INTERVAL - (1 - CONFIDENCE_INTERVAL) / 2) - - -STATS = ["min", "max", "median", "mean", "std", percent_low, percent_up] -GATHER_STATS = {"Overhead": STATS} - def collect_heatmap(out_csv: str, repeat: int): if os.path.exists(out_csv): @@ -88,37 +73,25 @@ def collect_heatmap(out_csv: str, repeat: int): print(f"{reac=} {dur=} {niter=} done") -def plot_heatmap(path: str): +def plot_heatmap(path: str, rate: float): df = pandas.read_csv(path) - df["Overhead"] = (df["Preemptive"] - df["Baseline"]) / df["Baseline"] * 100.0 - print(df) - df = df.groupby(list(PARAMETERS)).agg(GATHER_STATS).reset_index() print(df) fig, ax = plt.subplots() - ax.set_xticks( - np.arange(len(PARAMETERS["DurationTask"])), - [str(d) for d in PARAMETERS["DurationTask"]], - ) - ax.set_yticks( - np.arange(len(PARAMETERS["N_Iter"])), - [str(d) for d in PARAMETERS["N_Iter"]], - ) data = np.zeros((len(PARAMETERS["N_Iter"]), len(PARAMETERS["DurationTask"]))) for i, niter in enumerate(PARAMETERS["N_Iter"]): for j, dur in enumerate(PARAMETERS["DurationTask"]): - condition = (df["N_Iter"] == niter) & (df["DurationTask"] == dur) - overhead = df[condition]["Overhead"] - print(overhead) - lb = np.float64(overhead["percent_low"].iloc[0]) - ub = np.float64(overhead["percent_up"].iloc[0]) - mid = (lb + ub) / 2 - err = ub - mid + df_filtered = df[(df["N_Iter"] == niter) & (df["DurationTask"] == dur)] + baselines = df_filtered["Baseline"].values + preemptives = df_filtered["Preemptive"].values + overhead = get_overhead(baselines, preemptives) + ecdf = np.linspace(0, 1, len(overhead)) + mid = overhead[bisect_left(ecdf, rate)] data[i, j] = mid ax.text( j, i, - f"{data[i,j]:.2f}±{err:.2f}%", + f"{data[i,j]:.2f}%", ha="center", va="center", color="w", @@ -126,10 +99,16 @@ def plot_heatmap(path: str): ax.imshow(data) fig.tight_layout() + + def settick(set_func: Callable, key: str): + set_func(list(range(len(PARAMETERS[key]))), list(map(str, PARAMETERS[key]))) + + settick(ax.set_xticks, "DurationTask") + settick(ax.set_yticks, "N_Iter") ax.set_xlabel("Duration of each iteration in approx e-7 secs.") ax.set_ylabel("Number of iterations.") ax.set_title( - f"Overhead in % of the iteration using preemptive iter vs native iter. (reactivity = {PARAMETERS['Reactivity'][0]} μs)" + f"Overhead in % of the iteration using preemptive iter vs native iter. (reactivity = {PARAMETERS['Reactivity'][0]} μs, confidence = {rate*100:.2f}%)" ) plt.show() @@ -195,6 +174,12 @@ def main(): parser_plot = subparsers.add_parser("plot") parser_plot.add_argument("input_csv", help="File to use for plot.") + parser_plot.add_argument( + "--confidence-rate", + type=float, + default=0.95, + help="Confidence rate for the value showed on the heatmap. It displays the value x such that P(Overhead < x) = rate.", + ) parser_likelyhood = subparsers.add_parser("likelyhood") parser_likelyhood.add_argument("input_csv", help="File to use for plot.") @@ -222,7 +207,7 @@ def main(): case "collect": collect_heatmap(args.output_csv, args.repeat) case "plot": - plot_heatmap(args.input_csv) + plot_heatmap(args.input_csv, args.confidence_rate) case "likelyhood": plot_likelyhood(args.input_csv, args.n_iter, args.task_dur, args.reac) case other: diff --git a/src/lib.rs b/src/lib.rs index 17a93c7a4a4c3313f3a52a4cc27612ff91f6fc20..d6e7493aac942372de34f445ea10a07f1232423e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -39,7 +39,7 @@ impl<T, I: Iterator<Item = T>> PreemptiveIterator for I { ) -> B { let mut acc = init; let react_us = reactivity.as_micros(); - let mut n_iters = 1; + let mut n_iters = 8; loop { let start = Instant::now(); let mut total_block = 0;