Skip to content

Commit

Permalink
Improve log messages, warning and errors
Browse files Browse the repository at this point in the history
Improve the log messages, warning and errors, by:
- Making them more explicit
- Including what the variable value or type not expected currently is
- Including what the variable value or type not expected should be
- Stating the explicit type of warning or error (DeprecationWarning, ValueError, etc.)
- Converting them to F-strings (for better in-code readability)
- Formatting warnings and errors with a Capital letter, while leaving log-statements lowercase.

This will make debugging easier, allowing more time for model development and experimentation.

There are two placed where it's mentioned that a function is deprecated, but not when they will be removed. I added TODO's there, I think we should decide on a version in which we want to remove them.
  • Loading branch information
EwoutH committed Oct 29, 2023
1 parent b76b487 commit 42477c9
Show file tree
Hide file tree
Showing 12 changed files with 85 additions and 74 deletions.
2 changes: 1 addition & 1 deletion ema_workbench/em_framework/callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ def _store_outcomes(self, case_id, outcomes):
try:
outcome_res = outcomes[outcome_name]
except KeyError:
message = f"{outcome_name} not specified as outcome in model(s)"
message = f"outcome {outcome_name} not specified as outcome in model(s)"
_logger.debug(message)
else:
try:
Expand Down
4 changes: 2 additions & 2 deletions ema_workbench/em_framework/ema_multiprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ def run(self):
record = self.queue.get()
# get the logger for this record
if record is None:
_logger.debug("none received")
_logger.debug("no record received from queue")
break

logger = logging.getLogger(record.name)
Expand Down Expand Up @@ -247,7 +247,7 @@ def run(self):
result = self.queue.get()
# get the logger for this record
if result is None:
_logger.debug("none received")
_logger.debug("no record received from queue")
break

self.callback(*result.get())
Expand Down
21 changes: 8 additions & 13 deletions ema_workbench/em_framework/evaluators.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,7 @@ def __init__(self, msis):
for entry in msis:
if not isinstance(entry, AbstractModel):
raise TypeError(
f"{entry} should be an AbstractModel "
f"instance but is a {entry.__class__} instance"
f"{entry} should be an AbstractModel instance, but is a {entry.__class__} instance"
)

self._msis = msis
Expand Down Expand Up @@ -346,14 +345,16 @@ def __init__(self, msis, n_processes=None, maxtasksperchild=None, **kwargs):
if isinstance(n_processes, int):
if n_processes > 0:
if max_processes < n_processes:
warnings.warn(f"the number of processes cannot be more then {max_processes}")
warnings.warn(
f"The number of processes cannot be more then {max_processes}", UserWarning
)
self.n_processes = min(n_processes, max_processes)
else:
self.n_processes = max(max_processes + n_processes, 1)
elif n_processes is None:
self.n_processes = max_processes
else:
raise ValueError("max_processes must be an integer or None")
raise ValueError(f"max_processes must be an integer or None, not {type(n_processes)}")

self.maxtasksperchild = maxtasksperchild

Expand Down Expand Up @@ -569,11 +570,7 @@ def perform_experiments(

if callback.i != nr_of_exp:
raise EMAError(
(
"some fatal error has occurred while "
"running the experiments, not all runs have "
"completed. expected {}, got {}"
).format(nr_of_exp, callback.i)
f"Some fatal error has occurred while running the experiments, not all runs have completed. Expected {nr_of_exp}, got {callback.i}"
)

_logger.info("experiments finished")
Expand Down Expand Up @@ -723,15 +720,13 @@ def optimize(
"""
if searchover not in ("levers", "uncertainties"):
raise EMAError(
"searchover should be one of 'levers' or" "'uncertainties' not {}".format(searchover)
)
raise EMAError(f"Searchover should be one of 'levers' or 'uncertainties' not {searchover}")

try:
if len(models) == 1:
models = models[0]
else:
raise NotImplementedError("optimization over multiple" "models yet supported")
raise NotImplementedError("Optimization over multiple models yet supported")
except TypeError:
pass

Expand Down
2 changes: 1 addition & 1 deletion ema_workbench/em_framework/experiment_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def run_experiment(self, experiment):
# sys.stderr.write("\n")

errortype = type(e).__name__
raise EMAError(f"exception in run_model\nCaused by: {errortype}: {str(e)}")
raise EMAError(f"Exception in run_model\nCaused by: {errortype}: {str(e)}")

outcomes = model.outcomes_output
model.reset_model()
Expand Down
31 changes: 18 additions & 13 deletions ema_workbench/em_framework/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,9 @@ def __init__(self, name):
super().__init__(name)

if not self.name.isalnum():
raise EMAError("name of model should only contain " "alpha numerical characters")
raise EMAError(
f"Name of model ({self.name}) should only contain alpha numerical characters"
)

self._output_variables = None
self._outcomes_output = {}
Expand Down Expand Up @@ -147,7 +149,7 @@ def _transform(self, sampled_parameters, parameters):
if par.default is not None:
value = par.default
else:
_logger.debug(f"{par.name} not found")
_logger.debug(f"parameter {par.name} not found in sampled_parameters")
continue

multivalue = False
Expand Down Expand Up @@ -206,7 +208,11 @@ def retrieve_output(self):
-------
dict with the results of a model run.
"""
warnings.warn("deprecated, use model.output instead")
# TODO: We should determine a version in which this method is removed and add that to the deprecation warning
warnings.warn(
"The 'retrieve_output' method is deprecated and will be removed in a future version. Use 'model.output' instead.",
DeprecationWarning,
)
return self.output

@method_logger(__name__)
Expand Down Expand Up @@ -284,7 +290,7 @@ def replications(self, replications):
self._replications = [MyDict(**entry) for entry in replications]
self.nreplications = len(replications)
else:
raise TypeError(f"replications should be int or list not {type(replications)}")
raise TypeError(f"Replications should be int or list, not {type(replications)}")

@method_logger(__name__)
def run_model(self, scenario, policy):
Expand All @@ -303,7 +309,7 @@ def run_model(self, scenario, policy):
partial_experiment = combine(scenario, self.policy, constants)

for i, rep in enumerate(self.replications):
_logger.debug(f"replication {i}")
_logger.debug(f"replication {i} of {self.nreplications}")
rep.id = i
experiment = ExperimentReplication(scenario, self.policy, constants, rep)
output = self.run_experiment(experiment)
Expand Down Expand Up @@ -376,7 +382,7 @@ def __init__(self, name, function=None):
super().__init__(name)

if not callable(function):
raise ValueError("function should be callable")
raise ValueError("Function should be callable")

self.function = function

Expand All @@ -400,7 +406,7 @@ def run_experiment(self, experiment):
try:
value = model_output[variable]
except KeyError:
_logger.warning(variable + " not found in model output")
_logger.warning(f"variable {variable} not found in model output")
value = None
except TypeError:
value = model_output[i]
Expand All @@ -423,7 +429,7 @@ def working_directory(self):
@working_directory.setter
def working_directory(self, path):
wd = os.path.abspath(path)
_logger.debug("setting working directory to " + wd)
_logger.debug(f"setting working directory to {wd}")
self._working_directory = wd

def __init__(self, name, wd=None):
Expand All @@ -446,7 +452,7 @@ def __init__(self, name, wd=None):
self.working_directory = wd

if not os.path.exists(self.working_directory):
raise ValueError(f"{self.working_directory} does not exist")
raise ValueError(f"Working directory {self.working_directory} does not exist")

def as_dict(self):
model_specs = super().as_dict()
Expand All @@ -462,7 +468,7 @@ def working_directory(self):
@working_directory.setter
def working_directory(self, path):
wd = os.path.abspath(path)
_logger.debug("setting working directory to " + wd)
_logger.debug(f"setting working directory to {wd}")
self._working_directory = wd

def __init__(self, name, wd=None, model_file=None):
Expand Down Expand Up @@ -501,12 +507,11 @@ def __init__(self, name, wd=None, model_file=None):

path_to_file = os.path.join(self.working_directory, model_file)
if not os.path.isfile(path_to_file):
raise ValueError("cannot find model file")
raise ValueError(f"Cannot find model file: {model_file},\nPath to file: {path_to_file}")

if os.getcwd() == self.working_directory:
raise ValueError(
"the working directory of the model cannot be "
"the same as the current working directory"
f"The working directory of the model cannot be the same as the current working directory\nBoth are: {self.working_directory}"
)

self.model_file = model_file
Expand Down
20 changes: 12 additions & 8 deletions ema_workbench/em_framework/optimization.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,10 @@


except ImportError:
warnings.warn("platypus based optimization not available", ImportWarning)
warnings.warn(
f"Platypus based optimization not available. Install with `pip install platypus-opt`",
ImportWarning,
)

class PlatypusProblem:
constraints = []
Expand Down Expand Up @@ -180,7 +183,7 @@ def to_problem(model, searchover, reference=None, constraints=None):
outcome_names = [outcome.name for outcome in outcomes]

if not outcomes:
raise EMAError("no outcomes specified to optimize over, " "all outcomes are of kind=INFO")
raise EMAError("No outcomes specified to optimize over, all outcomes are of kind=INFO")

problem = Problem(
searchover, decision_variables, outcome_names, constraints, reference=reference
Expand Down Expand Up @@ -216,7 +219,7 @@ def to_robust_problem(model, scenarios, robustness_functions, constraints=None):
outcome_names = [outcome.name for outcome in outcomes]

if not outcomes:
raise EMAError("no outcomes specified to optimize over, " "all outcomes are of kind=INFO")
raise EMAError("No outcomes specified to optimize over, all outcomes are of kind=INFO")

problem = RobustProblem(
decision_variables, outcome_names, scenarios, robustness_functions, constraints
Expand Down Expand Up @@ -640,9 +643,10 @@ class HyperVolume(AbstractConvergenceMetric):

def __init__(self, minimum, maximum):
super().__init__("hypervolume")
# TODO: Add version in which we're going to remove HyperVolume to the deprecation warning
warnings.warn(
"HyperVolume is deprecated, use ArchiveLogger and HypervolumeMetric instead",
warnings.DeprecationWarning,
DeprecationWarning,
)
self.hypervolume_func = Hypervolume(minimum=minimum, maximum=maximum)

Expand Down Expand Up @@ -774,7 +778,7 @@ def epsilon_nondominated(results, epsilons, problem):
"""
if problem.nobjs != len(epsilons):
ValueError(
f"the number of epsilon values ({len(epsilons)}) must match the number of objectives {problem.nobjs}"
f"The number of epsilon values ({len(epsilons)}) must match the number of objectives {problem.nobjs}"
)

results = pd.concat(results, ignore_index=True)
Expand Down Expand Up @@ -891,13 +895,13 @@ def rebuild_platypus_population(archive, problem):
missing_parameters = [
attr for attr in problem.parameter_names if not hasattr(row, attr)
]
raise EMAError(f"parameter names {missing_parameters} not found in archive")
raise EMAError(f"Parameter names {missing_parameters} not found in archive")

try:
objectives = [getattr(row, attr) for attr in problem.outcome_names]
except AttributeError:
missing_outcomes = [attr for attr in problem.outcome_names if not hasattr(row, attr)]
raise EMAError(f"outcome names {missing_outcomes} not found in archive'")
raise EMAError(f"Outcome names {missing_outcomes} not found in archive'")

solution = Solution(problem)
solution.variables = [
Expand Down Expand Up @@ -1071,7 +1075,7 @@ def _optimize(
pass
else:
if len(eps_values) != len(problem.outcome_names):
raise EMAError("number of epsilon values does not match number " "of outcomes")
raise EMAError("Number of epsilon values does not match number of outcomes")

if variator is None:
if all(isinstance(t, klass) for t in problem.types):
Expand Down
24 changes: 13 additions & 11 deletions ema_workbench/em_framework/outcomes.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,15 +182,13 @@ def process(self, values):
return self.function(values)
elif n_variables != n_values:
raise ValueError(
("number of variables is {}, " "number of outputs is {}").format(
n_variables, n_values
)
f"Number of variables is {n_variables}, number of outputs is {n_values}. They should be equal."
)
else:
return self.function(*values)
else:
if len(values) > 1:
raise EMAError("more than one value returned without " "processing function")
raise EMAError("More than one value returned without processing function")

return values[0]

Expand Down Expand Up @@ -304,7 +302,7 @@ class ScalarOutcome(AbstractOutcome):
@property
def expected_range(self):
if self._expected_range is None:
raise ValueError(f"no expected_range is set for {self.variable_name}")
raise ValueError(f"No expected_range is set for variable {self.variable_name}")
return self._expected_range

@expected_range.setter
Expand Down Expand Up @@ -332,7 +330,7 @@ def process(self, values):
values = super().process(values)
if not isinstance(values, numbers.Number):
raise EMAError(
f"outcome {self.name} should be a scalar, but is" f" {type(values)}: {values}"
f"Outcome {self.name} should be a scalar, but is a {type(values)}: {values}"
)
return values

Expand Down Expand Up @@ -420,7 +418,9 @@ def __init__(
def process(self, values):
values = super().process(values)
if not isinstance(values, collections.abc.Iterable):
raise EMAError(f"outcome {self.name} should be a collection")
raise EMAError(
f"Outcome {self.name} should be a collection, but is a {type(values)}: {values}"
)
return values

@classmethod
Expand Down Expand Up @@ -462,7 +462,7 @@ def from_disk(cls, filename, archive):
array_file.seek(0)
return np.load(array_file)
else:
raise EMAError("unknown file extension")
raise EMAError(f"Unknown file extension {filename}. Only .csv and .npy are supported.")


class TimeSeriesOutcome(ArrayOutcome):
Expand Down Expand Up @@ -549,7 +549,7 @@ def from_disk(cls, filename, archive):
if filename.endswith("csv"):
return pd.read_csv(f, index_col=False, header=0).values
else:
raise EMAError("unknown file extension")
raise EMAError(f"Unknown file extension {filename}. Only .csv and .npy are supported.")


class Constraint(ScalarOutcome):
Expand Down Expand Up @@ -626,7 +626,7 @@ def create_outcomes(outcomes, **kwargs):

for entry in ["name", "type"]:
if entry not in outcomes.columns:
raise ValueError(f"no {entry} column in dataframe")
raise ValueError(f"No {entry} column in outcome DataFrame")

temp_outcomes = []
for _, row in outcomes.iterrows():
Expand All @@ -638,6 +638,8 @@ def create_outcomes(outcomes, **kwargs):
elif kind == "timeseries":
outcome = TimeSeriesOutcome(name)
else:
raise ValueError("unknown type for " + name)
raise ValueError(
f"Unknown type for outcome {name}: {kind}. Should be 'scalar' or 'timeseries'"
)
temp_outcomes.append(outcome)
return temp_outcomes
3 changes: 2 additions & 1 deletion ema_workbench/em_framework/outputspace_exploration.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,8 @@ def __init__(
):
if problem.nobjs != len(grid_spec):
raise EMAError(
"the number of items in grid_spec does not match the number of objectives"
f"The number of items in grid_spec ({len(grid_spec)})"
f"does not match the number of objectives ({problem.nobjs})."
)
super().__init__(problem, population_size, generator=generator, **kwargs)
self.archive = HitBox(grid_spec)
Expand Down
Loading

0 comments on commit 42477c9

Please sign in to comment.