diff --git a/declearn/fairness/__init__.py b/declearn/fairness/__init__.py
index 6bfd887023ed43c71ec14b01ab3b61fc8c635217..929b2e82f9a92e87f7ce0b0dd46d2d64f68a91c9 100644
--- a/declearn/fairness/__init__.py
+++ b/declearn/fairness/__init__.py
@@ -17,6 +17,41 @@
 
 """Processes and components for fairness-aware federated learning.
 
+Introduction
+------------
+
+This modules provides with a general API and some specific algorithms
+to measure and enforce group fairness as part of a federated learning
+process in DecLearn.
+
+Group fairness refers to a setting where a classifier is trained over
+data that can be split between various subsets based on one or more
+categorical sensitive attributes, usually comprising the target label.
+In such a setting, the model's fairness is defined and evaluated by
+comparing its accuracy over the various subgroups, using one of the
+various definitions proposed in the litterature.
+
+The algorithms and shared API implemented in this module consider that
+the fairness being measured (and optimized) is to be computed over the
+union of all training datasets held by clients. The API is designed to
+be compatible with any number of sensitive groups, with regimes where
+individual clients do not necessarily hold samples to each and every
+group, and with all group fairness definitions that can be expressed
+in a form that was introduced in paper [1]. However, some restrictions
+may be enforced by concrete algorithms, in alignment with those set by
+their original authors.
+
+Currently, concrete algorithms include:
+
+- Fed-FairGrad, adapted from [1]
+- Fed-FairBatch, adapted from [2], and the FedFB variant based on [3]
+- FairFed, based on [4]
+
+In addition, a "monitor-only" algorithm is provided, that merely uses
+the shared API to measure client-wise and global fairness throughout
+training without altering the training algorithm.
+
+
 API-defining and core submodules
 --------------------------------
 
@@ -36,6 +71,26 @@ Algorithms submodules
     Fed-FairGrad algorithm controllers and utils.
 * [monitor][declearn.fairness.monitor]:
     Fairness-monitoring controllers, that leave training unaltered.
+
+References
+----------
+
+- [1]
+    Maheshwari & Perrot (2023).
+    FairGrad: Fairness Aware Gradient Descent.
+    https://openreview.net/forum?id=0f8tU3QwWD
+- [2]
+    Roh et al. (2020).
+    FairBatch: Batch Selection for Model Fairness.
+    https://arxiv.org/abs/2012.01696
+- [3]
+    Zeng et al. (2022).
+    Improving Fairness via Federated Learning.
+    https://arxiv.org/abs/2110.15545
+- [4]
+    Eszzeldin et al. (2021).
+    FairFed: Enabling Group Fairness in Federated Learning
+    https://arxiv.org/abs/2110.00857
 """
 
 from . import api
diff --git a/declearn/fairness/api/_client.py b/declearn/fairness/api/_client.py
index 3d0870985245d30240ad4e08b00ba911a21fdaa7..f9ce30bb5dd9dbe8c614ede974909165ade280a0 100644
--- a/declearn/fairness/api/_client.py
+++ b/declearn/fairness/api/_client.py
@@ -55,7 +55,59 @@ __all__ = [
 
 @create_types_registry(name="FairnessControllerClient")
 class FairnessControllerClient(metaclass=abc.ABCMeta):
-    """Abstract base class for client-side fairness controllers."""
+    """Abstract base class for client-side fairness controllers.
+
+    Usage
+    -----
+    A `FairnessControllerClient` (subclass) instance has two main
+    routines that are to be called as part of a federated learning
+    process, in addition to a static method from the base API class:
+
+    - `from_setup_query`:
+        This is a static method that can be called generically from
+        the base `FairnessControllerClient` type to instantiate a
+        controller from a server-emitted `FairnessSetupQuery`.
+    - `setup_fairness`:
+        This routine is to be called only once, after instantiating
+        from a `FairnessSetupQuery`. It triggers the following process:
+            - Run a basic routine to exchange sensitive group definitions
+              and associated (encrypted) sample counts.
+            - Perform any additional algorithm-specific setup actions.
+    - `run_fairness_round`:
+        This routine is to be called once per round, before the next
+        training round occurs, upon receiving a `FairnessQuery` from
+        the server. It triggers the following process:
+            - Run a basic routine to compute fairness-related metrics
+              and send (some of) their (encrypted) values to the server.
+            - Perform any additonal algorithm-specific round actions.
+
+    Inheritance
+    -----------
+    Algorithm-specific subclasses should define the following abstract
+    attribute and methods:
+
+    - `algorithm`:
+        Abstract string class attribute. Name under which this controller
+        and its server-side counterpart classes are registered.
+    - `finalize_fairness_setup`:
+        Method implementing any algorithm-specific setup actions.
+    - `finalize_fairness_round`:
+        Method implementing any algorithm-specific round actions.
+
+    Additionally, they may overload or override the following method:
+
+    - `setup_fairness_metrics`:
+        Method that defines metrics being computed as part of fairness
+        rounds. By default, group-wise accuracy values are computed and
+        shared with the server, and the local fairness is computed from
+        them (but not sent to the server).
+
+    By default, subclasses are type-registered under their `algorithm`
+    name and "FairnessControllerClient" group upon declaration. This can
+    be prevented by passing `register=False` to the inheritance parameters
+    (e.g. `class Cls(FairnessControllerClient, register=False)`).
+    See `declearn.utils.register_type` for details on types registration.
+    """
 
     algorithm: ClassVar[str]
     """Name of the fairness-enforcing algorithm.
diff --git a/declearn/fairness/api/_dataset.py b/declearn/fairness/api/_dataset.py
index 205be89e76ed7cb9d3e26f4c5c7d36eaff9a46f4..4e4af5386cbc050a9afec507f6b30d886fe04573 100644
--- a/declearn/fairness/api/_dataset.py
+++ b/declearn/fairness/api/_dataset.py
@@ -30,7 +30,14 @@ __all__ = [
 
 
 class FairnessDataset(Dataset, metaclass=ABCMeta):
-    """Abstract base class for Fairness-aware Dataset interfaces."""
+    """Abstract base class for Fairness-aware Dataset interfaces.
+
+    This `declearn.dataset.Dataset` abstract subclass adds API methods
+    related to group fairness to the base dataset API. These revolve
+    around accessing sensitive group definitions, sample counts and
+    dataset subset. They further add the possibility to modify samples'
+    weights based on the sensitive group to which they belong.
+    """
 
     @abstractmethod
     def get_sensitive_group_definitions(
diff --git a/declearn/fairness/api/_server.py b/declearn/fairness/api/_server.py
index 8ef8a54c9ba0d73d5a988d7727990096c891d83d..c193b6b72ae7ac4831f2cc7d399abab23f5fd840 100644
--- a/declearn/fairness/api/_server.py
+++ b/declearn/fairness/api/_server.py
@@ -48,7 +48,50 @@ __all__ = [
 
 @create_types_registry(name="FairnessControllerServer")
 class FairnessControllerServer(metaclass=abc.ABCMeta):
-    """Abstract base class for server-side fairness controllers."""
+    """Abstract base class for server-side fairness controllers.
+
+    Usage
+    -----
+    A `FairnessControllerServer` (subclass) instance has two main
+    routines that are to be called as part of a federated learning
+    process:
+
+    - `setup_fairness`:
+        This routine is to be called only once, during the setup of the
+        overall federated learning task. It triggers the following process:
+            - Send a `FairnessSetupQuery` to clients so that they
+              instantiate a counterpart `FairnessControllerClient`.
+            - Run a basic routine to exchange sensitive group definitions
+              and (secure-)aggregate associated sample counts.
+            - Perform any additional algorithm-specific setup actions.
+
+    - `run_fairness_round`:
+        This routine is to be called once per round, before the next
+        training round occurs. A `FairnessQuery` should be sent to
+        clients prior to calling it. It triggers the following process:
+            - Run a basic routine to receive and (secure-)aggregate
+              metrics computed by clients that relate to fairness.
+            - Perform any additonal algorithm-specific round actions.
+
+    Inheritance
+    -----------
+    Algorithm-specific subclasses should define the following abstract
+    attribute and methods:
+
+    - `algorithm`:
+        Abstract string class attribute. Name under which this controller
+        and its client-side counterpart classes are registered.
+    - `finalize_fairness_setup`:
+        Method implementing any algorithm-specific setup actions.
+    - `finalize_fairness_round`:
+        Method implementing any algorithm-specific round actions.
+
+    By default, subclasses are type-registered under their `algorithm`
+    name and "FairnessControllerServer" group upon declaration. This can
+    be prevented by passing `register=False` to the inheritance parameters
+    (e.g. `class Cls(FairnessControllerServer, register=False)`).
+    See `declearn.utils.register_type` for details on types registration.
+    """
 
     algorithm: ClassVar[str]
     """Name of the fairness-enforcing algorithm.
diff --git a/declearn/fairness/core/_inmemory.py b/declearn/fairness/core/_inmemory.py
index 5f6da6ed60fdfeebec3185865e0ca4455d73e75d..d4855e676d4559453a0542bd639a924e92847e79 100644
--- a/declearn/fairness/core/_inmemory.py
+++ b/declearn/fairness/core/_inmemory.py
@@ -36,7 +36,18 @@ __all__ = [
 
 
 class FairnessInMemoryDataset(FairnessDataset, InMemoryDataset):
-    """Fairness-aware InMemoryDataset subclass."""
+    """Fairness-aware InMemoryDataset subclass.
+
+    This class extends `declearn.dataset.InMemoryDataset` to
+    enable its use in fairness-aware federated learning. New
+    parameters are added to its `__init__`: `s_attr` as well
+    as `sensitive_target`, that are used to define sensitive
+    groups among the held dataset. Additionally, API methods
+    from `declearn.fairness.api.FairnessDataset` are defined,
+    enabling to access sensitive groups' metadata and samples
+    as well as to change sample weights based on the group to
+    which samples belong.
+    """
 
     def __init__(
         self,
diff --git a/declearn/fairness/fairbatch/__init__.py b/declearn/fairness/fairbatch/__init__.py
index 56dbad116edfe73753e6e843a86cb52a261cef12..31ba338623edf84d85e945e26bf69e5ee17183f2 100644
--- a/declearn/fairness/fairbatch/__init__.py
+++ b/declearn/fairness/fairbatch/__init__.py
@@ -19,29 +19,36 @@
 
 Introduction
 ------------
-This module provides with a double-fold implementation of an adaptation
-of the FairBatch [1] algorithm for federated learning. On the one hand,
-the FedFB [2] algorithm is implemented, that both adapts FairBatch in a
-straightforward manner and introduces changes in formulas compared with
-the initial paper. On the other hand, the a custom algorithm deemed as
-Fed-FairBatch is implemented, that is similar in intent to FedFB but
-sticks to the raw FairBatch formulas.
-
-FairBatch is a group-fairness-enforcing algorithm that relies on a
-specific form of loss reweighting mediated by a specific batching
+FairBatch [1] is a group-fairness-enforcing algorithm that relies
+on a specific form of loss reweighting mediated via the batching
 of samples for SGD steps. Namely, in FairBatch, batches are drawn
 by concatenating group-wise sub-batches, the size of which is the
 byproduct of the desired total batch size and group-wise sampling
 probabilities, with the latter being updated throughout training
-based on the current model's fairness.
+based on the measured fairness of the current model.
+
+This module provides with a double-fold adaptation of FairBatch to
+the federated learning setting. On the one hand, a straightforward
+adaptation using the law of total probability is proposed, that is
+not based on any published paper. On the other hand, the FedFB [2]
+algorithm is implemented, which adapts FairBatch in a similar way
+but further introduces changes in formulas compared with the base
+paper. Both variants are available via a unique pair of classes,
+with a boolean switch enabling to choose between them.
 
-Initially, FairBatch is designed for binary classification tasks
+Originally, FairBatch was designed for binary classification tasks
 on data that have a single binary sensitive attribute. Both our
 implementations currently stick to that setting, in spite of the
-FedFB authors using a formalism that arguably extend formulas to
+FedFB authors using a formalism that arguably extends formulas to
 more generic categorical sensitive attribute(s) - which is not
 tested in the paper.
 
+Finally, it is worth noting that the translation of the sampling
+probabilities into the data batching process is done in accordance
+with the reference implementation by the original FairBatch authors.
+More details may be found in the documentation of `FairbatchDataset`
+(a backend tool that end-users do not need to use directly).
+
 Controllers
 -----------
 * [FairbatchControllerClient]
@@ -70,6 +77,17 @@ Messages
 * [FairbatchOkay][declearn.fairness.fairbatch.FairbatchOkay]
 * [FairbatchSamplingProbas[
 [declearn.fairness.fairbatch.FairbatchSamplingProbas]
+
+References
+----------
+- [1]
+    Roh et al. (2020).
+    FairBatch: Batch Selection for Model Fairness.
+    https://arxiv.org/abs/2012.01696
+- [2]
+    Zeng et al. (2022).
+    Improving Fairness via Federated Learning.
+    https://arxiv.org/abs/2110.15545
 """
 
 from ._messages import (
diff --git a/declearn/fairness/fairbatch/_server.py b/declearn/fairness/fairbatch/_server.py
index 8f0592af47583f26234edd3a79b94d6e6d2a8bec..603d314ccfe742b6c84df0e1d25f85d714d03778 100644
--- a/declearn/fairness/fairbatch/_server.py
+++ b/declearn/fairness/fairbatch/_server.py
@@ -43,6 +43,24 @@ __all__ = [
 class FairbatchControllerServer(FairnessControllerServer):
     """Server-side controller to implement Fed-FairBatch or FedFB.
 
+    FairBatch [1] is a group-fairness-enforcing algorithm that relies
+    on a specific form of loss reweighting mediated via the batching
+    of samples for SGD steps. Namely, in FairBatch, batches are drawn
+    by concatenating group-wise sub-batches, the size of which is the
+    byproduct of the desired total batch size and group-wise sampling
+    probabilities, with the latter being updated throughout training
+    based on the measured fairness of the current model.
+
+    This controller implements an adaptation of FairBatch for federated
+    learning, that is limited to the setting of the original paper, i.e.
+    a binary classification task on data that have a single and binary
+    sensitive attribute.
+
+    The `fedfb` instantiation parameter controls whether formulas from
+    the original paper should be used for computing and updating group
+    sampling probabilities (the default), or be replaced with variants
+    introduced in the FedFB algorithm from paper [2].
+
     References
     ----------
     - [1]
diff --git a/declearn/fairness/fairfed/__init__.py b/declearn/fairness/fairfed/__init__.py
index d0fdc4e5d221c9af6512b7af8590b10599c3285f..0185d990bbe4af39e36a18fe885f8338b723aa63 100644
--- a/declearn/fairness/fairfed/__init__.py
+++ b/declearn/fairness/fairfed/__init__.py
@@ -19,11 +19,12 @@
 
 Introduction
 ------------
-This module provides with an implementation of FairFed, an algorithm
-introduced by Ezzeldin et al. (2021), that weights client-wise model
-updates' averaging based on differences between the global and local
-fairness of the (prior version of the) shared model, using somewhat
-ad hoc discrepancy metrics to summarize fairness as scalar values.
+This module provides with an implementation of FairFed [1], an
+algorithm that aims at enforcing fairness in a federated learning
+setting by weighting client-wise model updates' averaging based on
+differences between the global and local fairness of the (prior
+version of the) shared model, using somewhat ad hoc discrepancy
+metrics to summarize fairness as scalar values.
 
 This algorithm was originally designed for settings where a binary
 classifier is trained over data with a single binary sensitive
@@ -62,6 +63,13 @@ Messages
 * [FairfedFairness][declearn.fairness.fairfed.FairfedFairness]
 * [FairfedOkay][declearn.fairness.fairfed.FairfedOkay]
 * [SecaggFairfedDelta][declearn.fairness.fairfed.SecaggFairfedDelta]
+
+References
+----------
+- [1]
+    Eszzeldin et al. (2021).
+    FairFed: Enabling Group Fairness in Federated Learning
+    https://arxiv.org/abs/2110.00857
 """
 
 from ._messages import (
diff --git a/declearn/fairness/fairfed/_server.py b/declearn/fairness/fairfed/_server.py
index a143901469d11a98ed9b415181c02fae47b0ff18..1d66a7e36d2acada37b3e96c51b546d7a4a0592a 100644
--- a/declearn/fairness/fairfed/_server.py
+++ b/declearn/fairness/fairfed/_server.py
@@ -49,7 +49,35 @@ __all__ = [
 
 
 class FairfedControllerServer(FairnessControllerServer):
-    """Server-side controller to implement FairFed."""
+    """Server-side controller to implement FairFed.
+
+    FairFed [1] is an algorithm that aims at enforcing fairness in
+    a federated learning setting by altering the aggregation rule
+    for client-wise model updates. It conducts a weighted averaging
+    of these updates that is based on discrepancy metrics between
+    global and client-wise fairness measures.
+
+    This algorithm was originally designed for settings where a binary
+    classifier is trained over data with a single binary sensitive
+    attribute, with the authors showcasing their generic formulas over
+    a limited set of group fairness definitions. DecLearn expands it to
+    a broader case, enabling the use of arbitrary fairness definitions
+    over data that may have non-binary and/or many sensitive attributes.
+    A 'strict' mode is made available to stick to the original paper,
+    that is turned on by default and can be disabled at instantiation.
+
+    It is worth noting that the authors of FairFed suggest combining it
+    with other mechanisms that aim at enforcing local model fairness; at
+    the moment, this is not implemented in DecLearn, unless a custom and
+    specific `Model` subclass is implemented by end-users to do so.
+
+    References
+    ----------
+    - [1]
+        Eszzeldin et al. (2021).
+        FairFed: Enabling Group Fairness in Federated Learning
+        https://arxiv.org/abs/2110.00857
+    """
 
     algorithm = "fairfed"
 
diff --git a/declearn/fairness/fairgrad/__init__.py b/declearn/fairness/fairgrad/__init__.py
index b1fafacc9fcb89160caf2879d4993d3cd1aec0d9..7967c1ad17e8fd6e993c03ab87cfe64fd17de63e 100644
--- a/declearn/fairness/fairgrad/__init__.py
+++ b/declearn/fairness/fairgrad/__init__.py
@@ -19,15 +19,15 @@
 
 Introduction
 ------------
-This module provides with an implementation of Fed-FairGrad, a work-
-in-progress algorithm that aims at adapting the FairGrad algorithm
-(introduced by Maheshwari and Perrot (2022)) to the federated setting.
+This module provides with an implementation of Fed-FairGrad,
+a yet-to-be-published algorithm that adapts the FairGrad [1]
+algorithm to the federated learning setting.
 
-FairGrad formulates an optimization problem that aims at maximizing a
-group-fairness function while minimizing the overall loss of a model.
-Its solving relies on introducing sensitive-group-wise weights, that
-are updated throughout the training based on estimates of the current
-model's fairness on the training data.
+FairGrad aims at minimizing the training loss of a model under
+group-fairness constraints, with an optional epsilon tolerance.
+It relies on reweighting the loss using weights that are based
+on sensitive groups, and are updated throughout training based
+on estimates of the current fairness of the trained model.
 
 Fed-FairGrad formulates the same problem, and adjusts client-wise
 weights based on the repartition of group-wise data across clients.
@@ -60,6 +60,14 @@ Messages
 --------
 * [FairgradOkay][declearn.fairness.fairgrad.FairgradOkay]
 * [FairgradWeights][declearn.fairness.fairgrad.FairgradWeights]
+
+
+References
+----------
+- [1]
+    Maheshwari & Perrot (2023).
+    FairGrad: Fairness Aware Gradient Descent.
+    https://openreview.net/forum?id=0f8tU3QwWD
 """
 
 from ._messages import (
diff --git a/declearn/fairness/fairgrad/_server.py b/declearn/fairness/fairgrad/_server.py
index 822b2bc9fb8e7b75f51bb73adfc4424ca08eee80..8364e28ba72e9812c723e868cca634cab9c2a351 100644
--- a/declearn/fairness/fairgrad/_server.py
+++ b/declearn/fairness/fairgrad/_server.py
@@ -152,7 +152,29 @@ class FairgradWeightsController:
 
 
 class FairgradControllerServer(FairnessControllerServer):
-    """Server-side controller to implement Fed-FairGrad."""
+    """Server-side controller to implement Fed-FairGrad.
+
+    FairGrad [1] is an algorithm to learn a model under group-fairness
+    constraints, that relies on reweighting its training loss based on
+    the current group-wise fairness levels of the model.
+
+    This controller, together with its client-side counterpart, implements
+    a straightforward adaptation of FairGrad to the federated learning
+    setting, where the fairness level of the model is computed robustly
+    and federatively at the start of each training round, and kept as-is
+    for all local training steps within that round.
+
+    This algorithm may be applied using any group-fairness definition,
+    with any number of sensitive attributes and, thereof, groups that
+    is compatible with the chosen definition.
+
+    References
+    ----------
+    - [1]
+        Maheshwari & Perrot (2023).
+        FairGrad: Fairness Aware Gradient Descent.
+        https://openreview.net/forum?id=0f8tU3QwWD
+    """
 
     algorithm = "fedfairgrad"
 
diff --git a/declearn/fairness/monitor/_server.py b/declearn/fairness/monitor/_server.py
index 12f2c2068004c96f29cd760ad70ef4869c8a965a..dbe6c6c56d2e0e61139751e319886f98cf182bd2 100644
--- a/declearn/fairness/monitor/_server.py
+++ b/declearn/fairness/monitor/_server.py
@@ -35,7 +35,17 @@ __all__ = [
 
 
 class FairnessMonitorServer(FairnessControllerServer):
-    """Server-side controller to monitor fairness without altering training."""
+    """Server-side controller to monitor fairness without altering training.
+
+    This controller, together with its client-side counterpart,
+    does not alter the training procedure of the model, but adds
+    computation and communication steps to measure its fairness
+    level at the start of each and every training round.
+
+    It is compatible with any group-fairness definition implemented
+    in DecLearn, and any number of sensitive groups compatible with
+    the chosen definition.
+    """
 
     algorithm = "monitor"