diff --git a/declearn/metrics/_classif.py b/declearn/metrics/_classif.py
index 5486d7745c6fa659e3b067bf0c22a864d4a7035b..c50e31f941dfa28b7fa2c767a6c161cd54b58fdb 100644
--- a/declearn/metrics/_classif.py
+++ b/declearn/metrics/_classif.py
@@ -219,7 +219,7 @@ class MulticlassAccuracyPrecisionRecall(Metric[ClassifConfmat]):
                 "f-score": 2 * diag / (pred + true),
             }
         # Convert NaNs resulting from zero-division to zero.
-        scores = {k: np.nan_to_num(v, copy=False) for k, v in scores.items()}
+        scores = {k: np.nan_to_num(v) for k, v in scores.items()}
         # Add a copy of the confusion matrix and return.
         scores["confusion"] = confmat.copy()
         return scores
diff --git a/declearn/metrics/_roc_auc.py b/declearn/metrics/_roc_auc.py
index 65cb8cf39aaf455f0038c859922be32298d0a032..f5bfb986fab2a5ea823d0d9a7239d7a7b3e0d0a2 100644
--- a/declearn/metrics/_roc_auc.py
+++ b/declearn/metrics/_roc_auc.py
@@ -291,8 +291,8 @@ class BinaryRocAUC(Metric[AurocState]):
         fneg = self._states.fneg[::-1]
         # Compute true- and false-positive rates and derive AUC.
         with np.errstate(invalid="ignore"):
-            tpr = np.nan_to_num(tpos / (tpos + fneg), copy=False)
-            fpr = np.nan_to_num(fpos / (fpos + tneg), copy=False)
+            tpr = np.nan_to_num(tpos / (tpos + fneg))
+            fpr = np.nan_to_num(fpos / (fpos + tneg))
         auc = sklearn.metrics.auc(fpr, tpr)
         return {
             "tpr": tpr,
diff --git a/declearn/model/sklearn/_sgd.py b/declearn/model/sklearn/_sgd.py
index c0c8aba02b159c9f45365e024696c815e998f67a..c1e0c49aeb2f6df9769a8fa1c0b25a20fa1ec0b8 100644
--- a/declearn/model/sklearn/_sgd.py
+++ b/declearn/model/sklearn/_sgd.py
@@ -445,7 +445,7 @@ class SklearnSGDModel(Model):
             raise TypeError(
                 f"Invalid data type for 'SklearnSGDModel': '{type(array)}'."
             )
-        return array.astype(self._dtype, copy=False)  # type: ignore
+        return array.astype(self._dtype)  # type: ignore
 
     def _compute_sample_gradient(
         self,
diff --git a/declearn/model/torch/_model.py b/declearn/model/torch/_model.py
index 1ad815967221aacfddde1b15c7b79a2cf526565a..0fefdcba73910d1f886ebd07d993836c9ad7ab9b 100644
--- a/declearn/model/torch/_model.py
+++ b/declearn/model/torch/_model.py
@@ -167,9 +167,9 @@ class TorchModel(Model):
     ) -> Self:
         """Instantiate a TorchModel from a configuration dict."""
         with io.BytesIO(bytes.fromhex(config["model"])) as buffer:
-            model = torch.load(buffer)
+            model = torch.load(buffer, weights_only=False)
         with io.BytesIO(bytes.fromhex(config["loss"])) as buffer:
-            loss = torch.load(buffer)
+            loss = torch.load(buffer, weights_only=False)
         if config.get("compile", False) and hasattr(torch, "compile"):
             model = torch.compile(model)
         return cls(model=model, loss=loss)