diff --git a/examples/quickrun/config.toml b/examples/quickrun/config.toml
index eaa59344da57ae02f25e846906bad61cf5765bf4..35c23aebda424df9cd53372d6dfc87dde084f21b 100644
--- a/examples/quickrun/config.toml
+++ b/examples/quickrun/config.toml
@@ -35,5 +35,5 @@ rounds = 10 # Number of overall training rounds
 
 
 [experiment] # What to report during the experiment and where to report it
-metrics=[["multi-classif",{labels = [0,1,2,3,4,5,6,7,8,9]}]]
+metrics=[["multi-classif",{labels = [0,1,2,3,4,5,6,7,8,9]}]] # Accuracy metric
 
diff --git a/examples/quickrun/custom/config_custom.toml b/examples/quickrun/custom/config_custom.toml
new file mode 100644
index 0000000000000000000000000000000000000000..73c0c9e5e1e5fdf551985019eb67962907e57819
--- /dev/null
+++ b/examples/quickrun/custom/config_custom.toml
@@ -0,0 +1,81 @@
+# This is a TOML file for demonstrating the extend of customization 
+# available for experiments through TOMl. It contains commented mock
+# fields and can be used as a template for other experiments. It will
+# not run as is as if references some not existing files for demonstration 
+# purposes.
+
+
+[network] # Network configuration used by both client and server
+protocol = "websockets" # Protocol used, to keep things simple use websocket
+host = "127.0.0.1" # Address used, works as is on most set ups
+port = 8765 # Port used, works as is on most set ups
+
+[model] # Information on where to find the model file
+# The location to a model file, if not following expected structure
+model_file = "examples/quickrun/custom/model_custom.py"
+# The name of your model file, if different from "MyModel"
+model_name = "MyCustomModel" 
+
+[data] # How to split your data
+
+# If you want to split your data 
+# Where to save your split data, if not in the expected "result" folder
+data_folder = "examples/quickrun/custom/data_custom"
+n_shards = 3 # Number of simulated clients
+# Where to find the original data
+data_file = "examples/quickrun/data_unsplit/train_data.npy"
+# Where to find the original data labels
+label_file = "examples/quickrun/data_unsplit/train_target.npy"
+scheme = "iid" # How to split your data between simulated clients
+perc_train = 0.8 # For each client, how much data 
+seed = 22
+
+# If you have already split data, not using expected names 
+# The custom names of your clients
+client_names = ["client_0","client_1","client_2"]
+    [dataset_names] # The names train and test datasets
+    train_data = "train_data" 
+    train_target = "train_target"
+    valid_data = "valid_data"
+    valid_target = "valid_target"
+
+[optim] # Optimizers options for both client and server
+aggregator = "averaging" # Server aggregation strategy
+server_opt = 1.0 # The server learning rate
+
+    [optim.client_opt] # Client optimization strategy
+    lrate = 0.001 # Client learning rate
+    # List of optimizer modules used
+    modules = [["momentum", {"beta" = 0.9}]]
+    # List of regularizer modules
+    regularizers = [["lasso", {alpha = 0.1}]]
+
+[run] # Training process option for both client and server
+rounds = 10 # Number of overall training rounds
+
+    [run.register] # Client registration options
+    min_clients = 1 # Minimum of clients that need to connect
+    max_clients = 6 # The maximum number of clients that can connect
+    timeout = 5 # How long to wait for clients, in seconds
+
+    [run.training] # Client training procedure
+    n_epoch = 1 # Number of local epochs
+    batch_size = 48 # Training batch size
+    drop_remainder = false # Whether to drop the last trainig examples
+
+    [run.evaluate]
+    batch_size = 128 # Evaluation batch size
+
+
+[experiment] # What to report during the experiment and where to report it
+metrics=[["multi-classif",{labels = [0,1,2,3,4,5,6,7,8,9]}]] # Accuracy metric
+checkpoint = "examples/quickrun/result_custom" # Custom location to results
+
+
+
+
+
+
+
+
+
diff --git a/examples/quickrun/custom/model_custom.py b/examples/quickrun/custom/model_custom.py
new file mode 100644
index 0000000000000000000000000000000000000000..4aaef2c33527b260fe04187cb0a65a7a8f662624
--- /dev/null
+++ b/examples/quickrun/custom/model_custom.py
@@ -0,0 +1,24 @@
+"""Wrapping a torch model"""
+
+import tensorflow as tf
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from declearn.model.tensorflow import TensorflowModel
+from declearn.model.torch import TorchModel
+
+
+stack = [
+    tf.keras.layers.InputLayer(input_shape=(28, 28, 1)),
+    tf.keras.layers.Conv2D(32, 3, 1, activation="relu"),
+    tf.keras.layers.Conv2D(64, 3, 1, activation="relu"),
+    tf.keras.layers.MaxPool2D(2),
+    tf.keras.layers.Dropout(0.25),
+    tf.keras.layers.Flatten(),
+    tf.keras.layers.Dense(128, activation="relu"),
+    tf.keras.layers.Dropout(0.5),
+    tf.keras.layers.Dense(10, activation="softmax"),
+]
+model = tf.keras.models.Sequential(stack)
+MyCustomModel = TensorflowModel(model, loss="sparse_categorical_crossentropy")
diff --git a/examples/quickrun/readme.md b/examples/quickrun/readme.md
index e60fb6ea881c12a4276fbacae91dda2bf75994fa..2eb05b6444cc95f981a1956b45d9d7e7664eb16a 100644
--- a/examples/quickrun/readme.md
+++ b/examples/quickrun/readme.md
@@ -69,11 +69,14 @@ in `./custom/config_custom.toml`
 ## The data
 
 Your data, in a standard tabular format. This data can either require
-splitting or be already split by client
+splitting or be already split by client. If your data requires splitting,
+we provide with an experimental data splitter with currently a limited
+scope.
 
 Requires splitting:
 
 * You have a single dataset and want to use provided utils to split it
+between simulated clients
 * In which case you need to mention your data source in the TOML file,
 as well as details on how to split your data. See
 `./custom/config_custom.toml` for details.
@@ -93,7 +96,7 @@ add details in the TOML file on where to find this data. See
 The quickrun mode expects a `config` path as an argument. This can be the path to :
 
 * A folder, expected to be structured a certain way
-* A TOML file, where the location of every other object is mentionned
+* A TOML file, where the location of other object is mentionned
 
 In both cases, the default is to check the folder provided, or the TOML
 parent folder, is structured as follows:
@@ -102,14 +105,17 @@ parent folder, is structured as follows:
     folder/
     │    config.toml - the config file
     │    model.py - the model
-    └─── data*/
-        └─── client*/
+    └─── data*/ - a folder starting with 'data' containing split data
+        └─── client*/ - one folder per client, each containing 4 files
         │      train_data.* - training data
         │      train_target.* - training labels
         │      valid_data.* - validation data
         │      valid_target.* - validation labels
         └─── client*/
         │    ...
+    └─── result/
+        └─── client*/
+        ...
 ```
 
 Any changes to this structure should be referenced in the TOML file, as