From 16c17a38906bf700cac7bf0dc4115ec065bf844f Mon Sep 17 00:00:00 2001 From: lijialin03 Date: Tue, 17 Dec 2024 04:14:04 +0000 Subject: [PATCH] update code 2 --- deepxde/optimizers/paddle/optimizers.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/deepxde/optimizers/paddle/optimizers.py b/deepxde/optimizers/paddle/optimizers.py index c585f2855..73c9df6e8 100644 --- a/deepxde/optimizers/paddle/optimizers.py +++ b/deepxde/optimizers/paddle/optimizers.py @@ -25,10 +25,10 @@ def get(params, optimizer, learning_rate=None, decay=None, weight_decay=None): return optimizer if optimizer in ["L-BFGS", "L-BFGS-B"]: - if weight_decay is not None: - raise ValueError("L-BFGS optimizer doesn't support weight_decay") if learning_rate is not None or decay is not None: print("Warning: learning rate is ignored for {}".format(optimizer)) + if weight_decay is not None: + raise ValueError("L-BFGS optimizer doesn't support weight_decay") optim = paddle.optimizer.LBFGS( learning_rate=1, max_iter=LBFGS_options["iter_per_step"], @@ -67,7 +67,7 @@ def get(params, optimizer, learning_rate=None, decay=None, weight_decay=None): or weight_decay._coeff == 0 ): raise ValueError( - "AdamW optimizer requires L2 regularizer and non-zero weight decay" + "AdamW optimizer requires non-zero L2 regularizer" ) return paddle.optimizer.AdamW( learning_rate=learning_rate,