diff --git a/pyoptsparse/pySLSQP/pySLSQP.py b/pyoptsparse/pySLSQP/pySLSQP.py index bb2431b4..2207239b 100644 --- a/pyoptsparse/pySLSQP/pySLSQP.py +++ b/pyoptsparse/pySLSQP/pySLSQP.py @@ -12,6 +12,7 @@ import numpy as np # Local modules +from ..pyOpt_error import pyOptSparseWarning from ..pyOpt_optimizer import Optimizer from ..pyOpt_utils import try_import_compiled_module_from_path @@ -166,7 +167,9 @@ def __call__( # SLSQP - Objective/Constraint Values Function # ================================================================= def slfunc(m, me, la, n, f, g, x): - fobj, fcon, fail = self._masterFunc(x, ["fobj", "fcon"]) + if (x < blx).any() or (x > bux).any(): + pyOptSparseWarning("Values in x were outside bounds during" " a minimize step, clipping to bounds") + fobj, fcon, fail = self._masterFunc(np.clip(x, blx, bux), ["fobj", "fcon"]) f = fobj g[0:m] = -fcon slsqp.pyflush(self.getOption("IOUT")) @@ -176,7 +179,7 @@ def slfunc(m, me, la, n, f, g, x): # SLSQP - Objective/Constraint Gradients Function # ================================================================= def slgrad(m, me, la, n, f, g, df, dg, x): - gobj, gcon, fail = self._masterFunc(x, ["gobj", "gcon"]) + gobj, gcon, fail = self._masterFunc(np.clip(x, blx, bux), ["gobj", "gcon"]) df[0:n] = gobj.copy() dg[0:m, 0:n] = -gcon.copy() slsqp.pyflush(self.getOption("IOUT")) @@ -220,6 +223,10 @@ def slgrad(m, me, la, n, f, g, df, dg, x): # fmt: on optTime = time.time() - t0 + # Clip final result to user bounds (this occurs during the optimization as well + # so this just makes the output consistent with what the optimizer sees) + xs = np.clip(xs, blx, bux) + # some entries of W include the lagrange multipliers # for each constraint, there are two entries (lower, upper). # if only one is active, look for the nonzero. If both are active, take the first one diff --git a/pyoptsparse/pySLSQP/source/lsq.f b/pyoptsparse/pySLSQP/source/lsq.f index 00a673d0..0ac509c2 100644 --- a/pyoptsparse/pySLSQP/source/lsq.f +++ b/pyoptsparse/pySLSQP/source/lsq.f @@ -177,8 +177,20 @@ SUBROUTINE LSQ(M,MEQ,N,NL,LA,L,G,A,B,XL,XU,X,Y,W,JW,MODE) CALL DCOPY (N3, W(IW+M+N), 1, Y(M+N3+1), 1) ENDIF + call bound(n, x, xl, xu) C END OF SUBROUTINE LSQ END - \ No newline at end of file + + subroutine bound(n, x, xl, xu) + integer n, i + double precision x(n), xl(n), xu(n) + do i = 1, n + if(x(i) < xl(i))then + x(i) = xl(i) + else if(x(i) > xu(i))then + x(i) = xu(i) + end if + end do + end subroutine bound \ No newline at end of file diff --git a/tests/test_slsqp.py b/tests/test_slsqp.py new file mode 100644 index 00000000..b7fa70e8 --- /dev/null +++ b/tests/test_slsqp.py @@ -0,0 +1,46 @@ +"""Test class for SLSQP specific tests""" + +# Standard Python modules +import unittest + +# First party modules +from pyoptsparse import OPT, Optimization + + +class TestSLSQP(unittest.TestCase): + def test_slsqp_strong_bound_enforcement(self): + """ + Test that SLSQP will never evaluate the function or gradient outside + the design variable bounds. Without strong bound enforcement, the + optimizer will step outside the bounds and a ValueError will be raised. + With strong bound enforement, this code will run without raising any + errors + """ + + def objfunc(xdict): + x = xdict["xvars"] + funcs = {} + if x[0] < 0: + raise ValueError("Function cannot be evaluated below 0.") + funcs["obj"] = (x[0] + 1.0) ** 2 + fail = False + return funcs, fail + + def sens(xdict, funcs): + x = xdict["xvars"] + if x[0] < 0: + raise ValueError("Function cannot be evaluated below 0.") + funcsSens = { + "obj": {"xvars": [2 * (x[0] + 1.0)]}, + } + fail = False + return funcsSens, fail + + optProb = Optimization("Problem with Error Region", objfunc) + optProb.addVarGroup("xvars", 1, lower=[0], value=[2]) + optProb.addObj("obj") + opt = OPT("SLSQP") + sol = opt(optProb, sens=sens) + self.assertEqual(sol.optInform["value"], 0) + self.assertGreaterEqual(sol.xStar["xvars"][0], 0) + self.assertAlmostEqual(sol.xStar["xvars"][0], 0, places=9)