Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

IPOPT duplicated history handling #252

Merged
merged 19 commits into from
May 18, 2021
Merged
Show file tree
Hide file tree
Changes from 12 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions doc/api/history.rst
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ In this case, the history file would have the following layout::
│ ├── funcs
│ │ ├── obj
│ │ └── con
│ ├── iter
│ ├── fail
│ └── isMajor
├── 1
Expand All @@ -46,6 +47,7 @@ In this case, the history file would have the following layout::
│ │ │ └── xvars
│ │ └── con
│ │ └── xvars
│ ├── iter
│ ├── fail
│ └── isMajor
└── last
Expand Down
32 changes: 30 additions & 2 deletions pyoptsparse/postprocessing/OptView_baseclass.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@
import numpy as np
from sqlitedict import SqliteDict

# pyOptSparse warning
from ..pyOpt_error import pyOptSparseWarning


class OVBaseClass(object):

Expand Down Expand Up @@ -111,6 +114,8 @@ def OptimizationHistory(self):
self.nkey = nkey

# Initalize a list detailing if the iterations are major or minor
# 1 = major, 2 = minor, 0 = sensitivity (or duplicated info by IPOPT)
kanekosh marked this conversation as resolved.
Show resolved Hide resolved
# The entries whose iter_type = 0 will be ignored.
self.iter_type = np.zeros(nkey)

# Check to see if there is bounds information in the db file.
Expand Down Expand Up @@ -155,6 +160,13 @@ def OptimizationHistory(self):
else:
self.storedIters = False

# Raise warning for IPOPT's duplicated history
if db["metadata"]["optimizer"] == "IPOPT" and "iter" not in db["0"].keys():
pyOptSparseWarning(
"The optimization history file has duplicated entries at every iteration, and the OptView plot is not correct. "
+ "Re-run the optimization with a current version of pyOptSparse to generate a correct history file."
)

# Save information from the history file for the funcs.
self.DetermineMajorIterations(db, OpenMDAO=OpenMDAO)

Expand All @@ -180,7 +192,10 @@ def OptimizationHistory(self):
def DetermineMajorIterations(self, db, OpenMDAO):

if not OpenMDAO:
# Loop over each optimization iteration

previousIterCounter = -1

# Loop over each optimization call
for i, iter_type in enumerate(self.iter_type):

# If this is an OpenMDAO file, the keys are of the format
Expand All @@ -192,15 +207,28 @@ def DetermineMajorIterations(self, db, OpenMDAO):
# actual major iteration. In particular, one has funcs
# and the next has funcsSens, but they're both part of the
# same major iteration.
# For IPOPT, it saves info for four calls for every
# actual major iteration: objective, constraints,
# and sensitivities of each.

if "funcs" in db[key].keys():
# check if this entry is duplicated info. Only relevant for IPOPT.
# Note: old hist files don't have "iter"
if "iter" in db[key].keys() and db[key]["iter"] == previousIterCounter:
# duplicated info
self.iter_type[i] = 0
kanekosh marked this conversation as resolved.
Show resolved Hide resolved

# if we did not store major iteration info, everything's major
if not self.storedIters:
elif not self.storedIters:
self.iter_type[i] = 1
# this is major iteration
elif self.storedIters and db[key]["isMajor"]:
self.iter_type[i] = 1
else:
self.iter_type[i] = 2

if "iter" in db[key].keys():
previousIterCounter = db[key]["iter"]
else:
self.iter_type[i] = 0 # this is not a real iteration,
# just the sensitivity evaluation
Expand Down
120 changes: 90 additions & 30 deletions pyoptsparse/pyOpt_history.py
Original file line number Diff line number Diff line change
Expand Up @@ -630,37 +630,25 @@ def getValues(self, names=None, callCounters=None, major=True, scale=False, stac
callCounters.append(self.read("last"))
callCounters.remove("last")

self._previousIterCounter = -1
# loop over call counters, check if each counter is valid, and parse
for i in callCounters:
if self.pointExists(i):
val = self.read(i)
if "funcs" in val.keys() or allowSens: # we have function evaluation
if ((major and val["isMajor"]) or not major) and not val["fail"]:
conDict, objDict, DVDict = self._processIterDict(val, scale=scale)
for name in names:
if name == "xuser":
data[name].append(self.optProb.processXtoVec(DVDict))
elif name in self.DVNames:
data[name].append(DVDict[name])
elif name in self.conNames:
data[name].append(conDict[name])
elif name in self.objNames:
data[name].append(objDict[name])
elif name in self.extraFuncsNames:
data[name].append(val["funcs"][name])
else: # must be opt
data[name].append(val[name])
elif val["fail"] and user_specified_callCounter:
pyOptSparseWarning(
("callCounter {} contained a failed function evaluation and is skipped!").format(i)
)
elif user_specified_callCounter:
pyOptSparseWarning(
(
"callCounter {} did not contain a function evaluation and is skipped! Was it a gradient evaluation step?"
).format(i)
)
elif user_specified_callCounter:
pyOptSparseWarning(("callCounter {} was not found and is skipped!").format(i))
val = self._readValidCallCounter(i, user_specified_callCounter, allowSens, major)
if val is not None: # if i is valid
conDict, objDict, DVDict = self._processIterDict(val, scale=scale)
for name in names:
if name == "xuser":
data[name].append(self.optProb.processXtoVec(DVDict))
elif name in self.DVNames:
data[name].append(DVDict[name])
elif name in self.conNames:
data[name].append(conDict[name])
elif name in self.objNames:
data[name].append(objDict[name])
elif name in self.extraFuncsNames:
data[name].append(val["funcs"][name])
else: # must be opt
data[name].append(val[name])

# reshape lists into numpy arrays
for name in names:
Expand All @@ -670,8 +658,80 @@ def getValues(self, names=None, callCounters=None, major=True, scale=False, stac
if data[name].ndim == 1:
data[name] = np.expand_dims(data[name], 1)

# Raise warning for IPOPT's duplicated history
if self.db["metadata"]["optimizer"] == "IPOPT" and "iter" not in self.db["0"].keys():
kanekosh marked this conversation as resolved.
Show resolved Hide resolved
pyOptSparseWarning(
"The optimization history of IPOPT has duplicated entries at every iteration. "
+ "Fix the history manually, or re-run the optimization with a current version of pyOptSparse to generate a correct history file. "
)
return data

def _readValidCallCounter(self, i, user_specified_callCounter, allowSens, major):
"""
Checks whether a call counter is valid and read the data. The call counter is valid when it is
1) inside the range of the history data,
2) a function evaluation (i.e. not a sensitivity evaluation, except when `allowSens = True`),
3) not a duplicated entry,
4) not a failed function evaluation,
5) a major iteration (only when `major = True`).

Parameters
----------
i : int
call counter.

user_specified_callCounter : bool
flag to specify whether the call counter `i` is requested by a user or not.

allowSens: bool
flag to specify whether gradient evaluation iterations are allowed.

major : bool
flag to specify whether to include only major iterations.

Returns
-------
val : dict or None
information corresponding to the call counter `i`.
If the call counter is not valid, `None` is returned instead.
"""

if not self.pointExists(i):
if user_specified_callCounter:
# user specified a non-existent call counter
pyOptSparseWarning(("callCounter {} was not found and is skipped!").format(i))
return None
else:
val = self.read(i)

# check if the callCounter is of a function call
if not ("funcs" in val.keys() or allowSens):
if user_specified_callCounter:
# user unintentionally specified a call counter for sensitivity
pyOptSparseWarning(
(
"callCounter {} did not contain a function evaluation and is skipped! Was it a gradient evaluation step?"
).format(i)
)
return None
kanekosh marked this conversation as resolved.
Show resolved Hide resolved
else:
# exclude the duplicated history (only when we have "iter" recorded)
if "iter" in val.keys():
duplicate_flag = val["iter"] == self._previousIterCounter
self._previousIterCounter = val["iter"] # update iterCounter for next i
if duplicate_flag and not user_specified_callCounter:
# this is a duplicate
return None
# end if "iter" in val.keys()

# check major/minor iteration, and if the call failed
if ((major and val["isMajor"]) or not major) and not val["fail"]:
return val
else:
return None
# end if - ("funcs" in val.keys()
# end if - pointExists

def __del__(self):
try:
self.db.close()
Expand Down
14 changes: 13 additions & 1 deletion pyoptsparse/pyOpt_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,9 @@ def __init__(
checkDefaultOptions=checkDefaultOptions,
caseSensitiveOptions=caseSensitiveOptions,
)
self.callCounter = 0
# callCounter will be incremented after the function calls, iterCounters will be incremented before the calls.
self.callCounter = 0 # counts all function calls (fobj, fcon, gobj, gcon)
self.iterCounter = -1 # counts iteration(new x point)
kanekosh marked this conversation as resolved.
Show resolved Hide resolved
self.sens: Union[None, Callable, Gradient] = None
self.optProb: Optimization
self.version: Optional[str] = version
Expand Down Expand Up @@ -217,6 +219,10 @@ def _masterFunc(self, x: ndarray, evaluate: List[str]):
values is required on return
"""

# Increment iteration counter if x is a new point
if not np.isclose(x, self.cache["x"], atol=EPS, rtol=EPS).all():
self.iterCounter += 1

# We are hot starting, we should be able to read the required
# information out of the hot start file, process it and then
# fire it back to the specific optimizer
Expand Down Expand Up @@ -280,6 +286,9 @@ def _masterFunc(self, x: ndarray, evaluate: List[str]):
if "gcon" in evaluate:
returns.append(gcon)

# Cache x because the iteration counter need this
self.cache["x"] = x.copy()

# We can now safely increment the call counter
self.callCounter += 1
returns.append(fail)
Expand Down Expand Up @@ -544,6 +553,9 @@ def _masterFunc2(self, x, evaluate, writeHist=True):
# Put the fail flag in the history:
hist["fail"] = masterFail

# Put the iteration counter in the history
hist["iter"] = self.iterCounter

# Save information about major iteration counting (only matters for SNOPT).
if self.name == "SNOPT":
hist["isMajor"] = False # this will be updated in _snstop if it is major
Expand Down