diff --git a/test/fairness/algorithms/test_fairgrad_weights_controller.py b/test/fairness/algorithms/test_fairgrad_weights_controller.py
index b3d8637fa61910eb68fcd9e31b51ea7812cc67ab..4259ee4e7de455d29f170050aee2ec7265a6b008 100644
--- a/test/fairness/algorithms/test_fairgrad_weights_controller.py
+++ b/test/fairness/algorithms/test_fairgrad_weights_controller.py
@@ -26,6 +26,7 @@ from declearn.fairness.api import FairnessFunction
 from declearn.fairness.fairgrad import FairgradWeightsController
 
 
+# pylint: disable=duplicate-code
 COUNTS = {(0, 0): 30, (0, 1): 15, (1, 0): 35, (1, 1): 20}
 F_TYPES = [
     "accuracy_parity",
@@ -33,6 +34,7 @@ F_TYPES = [
     "equality_of_opportunity",
     "equalized_odds",
 ]
+# pylint: enable=duplicate-code
 
 
 class TestFairgradWeightsController:
diff --git a/test/fairness/controllers/test_fairbatch_controllers.py b/test/fairness/controllers/test_fairbatch_controllers.py
index f6e5a67e3983f81df185f8c2de67aac2377b51fd..e815623598d6e37aa748505e7551dbd2042de9b0 100644
--- a/test/fairness/controllers/test_fairbatch_controllers.py
+++ b/test/fairness/controllers/test_fairbatch_controllers.py
@@ -49,6 +49,8 @@ with make_importable(os.path.dirname(os.path.abspath(__file__))):
 class TestFairbatchControllers(FairnessControllerTestSuite):
     """Unit tests for Fed-FairBatch / FedFB controllers."""
 
+    # similar code to FairGrad and parent code; pylint: disable=duplicate-code
+
     server_cls = FairbatchControllerServer
     client_cls = FairbatchControllerClient
 
@@ -169,13 +171,12 @@ class TestFairbatchControllers(FairnessControllerTestSuite):
             client.groups = server.groups.copy()
         counts = [TOTAL_COUNTS[group] for group in server.groups]
         # Run setup coroutines, using mock network endpoints.
-        aggregator = mock.create_autospec(SumAggregator, instance=True)
         async with setup_mock_network_endpoints(n_peers) as network:
             coro_server = server.finalize_fairness_setup(
                 netwk=network[0],
                 secagg=None,
                 counts=counts,
-                aggregator=aggregator,
+                aggregator=mock.create_autospec(SumAggregator, instance=True),
             )
             coro_clients = [
                 client.finalize_fairness_setup(
diff --git a/test/fairness/controllers/test_fairgrad_controllers.py b/test/fairness/controllers/test_fairgrad_controllers.py
index bd4af8c0f2ce0e11a8f53e4de676993e0ecd65d0..cf6612f973c1ff88010faae5d1d534124292f8e0 100644
--- a/test/fairness/controllers/test_fairgrad_controllers.py
+++ b/test/fairness/controllers/test_fairgrad_controllers.py
@@ -132,13 +132,12 @@ class TestFairgradControllers(FairnessControllerTestSuite):
             mock_dst.set_sensitive_group_weights.side_effect = Exception
         counts = [TOTAL_COUNTS[group] for group in server.groups]
         # Run setup coroutines, using mock network endpoints.
-        aggregator = mock.create_autospec(SumAggregator, instance=True)
         async with setup_mock_network_endpoints(n_peers) as network:
             coro_server = server.finalize_fairness_setup(
                 netwk=network[0],
                 secagg=None,
                 counts=counts,
-                aggregator=aggregator,
+                aggregator=mock.create_autospec(SumAggregator, instance=True),
             )
             coro_clients = [
                 client.finalize_fairness_setup(
diff --git a/test/fairness/test_fairness_inmemory_dataset.py b/test/fairness/test_fairness_inmemory_dataset.py
index 801c744da972bb57d1a4b59f267def7974c65db2..3551e34d677c4c857575a3ac6c88b335a864509d 100644
--- a/test/fairness/test_fairness_inmemory_dataset.py
+++ b/test/fairness/test_fairness_inmemory_dataset.py
@@ -72,9 +72,8 @@ class TestFairnessInMemoryDatasetInit:
         dst = FairnessInMemoryDataset(
             dataset, s_attr=s_attr, target="col_y", sensitive_target=True
         )
-        expected = pd.concat(
-            [pd.DataFrame(dataset["col_y"].rename("target")), s_attr],
-            axis=1,
+        expected = pd.DataFrame(
+            {"target": dataset["col_y"], "col_s": s_attr["col_s"]}
         ).apply(tuple, axis=1)
         assert isinstance(dst.sensitive, pd.Series)
         assert (dst.sensitive == expected).all()
diff --git a/test/functional/test_toy_clf_fairness.py b/test/functional/test_toy_clf_fairness.py
index 16c21650db5bbcfa0af18d37b177194a27ae6637..152922955e3d75818a8d0dd2f4482ecb44a11d97 100644
--- a/test/functional/test_toy_clf_fairness.py
+++ b/test/functional/test_toy_clf_fairness.py
@@ -117,6 +117,7 @@ async def server_routine(
     n_clients: int = 3,
 ) -> None:
     """Run the FL routine of the server."""
+    # similar to SecAgg functional test; pylint: disable=duplicate-code
     model = SklearnSGDModel.from_parameters(
         kind="classifier",
         loss="log_loss",
@@ -221,9 +222,7 @@ async def test_toy_classif_fairness(
         coro_server, *coro_clients, return_exceptions=True
     )
     # Assert that no exceptions occurred during the process.
-    errors = "\n".join(
-        repr(exc) for exc in outputs if isinstance(exc, Exception)
-    )
+    errors = "\n".join(repr(e) for e in outputs if isinstance(e, Exception))
     assert not errors, f"The FL process failed:\n{errors}"
     # Load and parse utility and fairness metrics at the final round.
     u_metrics = pd.read_csv(os.path.join(tmp_path, "metrics.csv"))
diff --git a/test/functional/test_toy_clf_secagg.py b/test/functional/test_toy_clf_secagg.py
index c55d3d3b9ad2deeaafca7a465b4af72aa78e79c9..a96d5e91737233aff9184dd33d850b50e6061946 100644
--- a/test/functional/test_toy_clf_secagg.py
+++ b/test/functional/test_toy_clf_secagg.py
@@ -221,13 +221,11 @@ async def run_declearn_experiment(
             for i, (train, valid) in enumerate(datasets)
         ]
         # Run the coroutines concurrently using asyncio.
-        outputs = await asyncio.gather(
+        output = await asyncio.gather(
             coro_server, *coro_clients, return_exceptions=True
         )
         # Assert that no exceptions occurred during the process.
-        errors = "\n".join(
-            repr(exc) for exc in outputs if isinstance(exc, Exception)
-        )
+        errors = "\n".join(repr(e) for e in output if isinstance(e, Exception))
         assert not errors, f"The FL process failed:\n{errors}"
         # Assert that the experiment ran properly.
         with open(
diff --git a/test/optimizer/test_modules.py b/test/optimizer/test_modules.py
index 123edf20db0101dfbac27aed62379a560cc7b03a..d3a91a808cf4f24e0f0c0109d3c269421413c50a 100644
--- a/test/optimizer/test_modules.py
+++ b/test/optimizer/test_modules.py
@@ -149,7 +149,7 @@ class OptiModuleTestSuite(PluginTestBase):
     ) -> None:
         # For Noise-addition mechanisms, seed the (unsafe) RNG.
         if issubclass(cls, NoiseModule):
-            cls = functools.partial(
+            cls = functools.partial(  # type: ignore[misc]
                 cls, safe_mode=False, seed=0
             )  # type: ignore  # partial wraps the __init__ method
         # Run the unit test.