From d377c6dfecbacb1535efee8babd16a099bf8fa88 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Wed, 31 Mar 2021 17:07:27 +0200 Subject: [PATCH 01/29] Add 8 .util methods from message_data broadcast, copy_column, ffill, make_io, make_matched_dfs, make_source_tech, merge_data, same_node --- doc/api/util.rst | 8 ++ message_ix_models/tests/test_util.py | 76 ++++++++++ message_ix_models/util/__init__.py | 206 ++++++++++++++++++++++++++- 3 files changed, 289 insertions(+), 1 deletion(-) diff --git a/doc/api/util.rst b/doc/api/util.rst index f84f747790..e7206b775f 100644 --- a/doc/api/util.rst +++ b/doc/api/util.rst @@ -19,10 +19,18 @@ Commonly used: .. autosummary:: as_codes + broadcast + copy_column + ffill load_package_data load_private_data + make_io + make_matched_dfs + make_source_tech + merge_data package_data_path private_data_path + same_node ~context.Context ~scenarioinfo.ScenarioInfo diff --git a/message_ix_models/tests/test_util.py b/message_ix_models/tests/test_util.py index 426b54a03c..19cb875d50 100644 --- a/message_ix_models/tests/test_util.py +++ b/message_ix_models/tests/test_util.py @@ -2,15 +2,22 @@ import logging from pathlib import Path +import pandas as pd import pytest +from message_ix import make_df +from message_ix_models import ScenarioInfo from message_ix_models.util import ( MESSAGE_DATA_PATH, MESSAGE_MODELS_PATH, as_codes, + broadcast, + copy_column, + ffill, iter_parameters, load_package_data, load_private_data, + make_source_tech, package_data_path, private_data_path, ) @@ -42,6 +49,37 @@ def test_as_codes_invalid(data): as_codes(data) +def test_copy_column(): + df = pd.DataFrame([[0, 1], [2, 3]], columns=["a", "b"]) + df = df.assign(c=copy_column("a"), d=4) + assert all(df["c"] == [0, 2]) + assert all(df["d"] == 4) + + +def test_ffill(): + years = list(range(6)) + + df = ( + make_df( + "fix_cost", + year_act=[0, 2, 4], + year_vtg=[0, 2, 4], + technology=["foo", "bar", "baz"], + unit="USD", + ) + .pipe(broadcast, node_loc=["A", "B", "C"]) + .assign(value=list(map(float, range(9)))) + ) + + # Function completes + result = ffill(df, "year_vtg", years, "year_act = year_vtg") + + assert 2 * len(df) == len(result) + assert years == sorted(result["year_vtg"].unique()) + + # TODO test some specific values + + def test_iter_parameters(test_context): """Parameters indexed by set 'node' can be retrieved.""" result = list(iter_parameters("node")) @@ -79,6 +117,44 @@ def test_load_private_data(*parts, suffix=None): load_private_data("sources.yaml") +def test_make_source_tech(): + info = ScenarioInfo() + info.set["node"] = ["World", "node0", "node1"] + info.set["year"] = [1, 2, 3] + + values = dict( + capacity_factor=1.0, + output=2.0, + var_cost=3.0, + technical_lifetime=4.0, + ) + result = make_source_tech( + info, + common=dict( + commodity="commodity", + level="level", + mode="mode", + technology="technology", + time="time", + time_dest="time", + unit="unit", + ), + **values, + ) + # Result is dictionary with the expected keys + assert isinstance(result, dict) + assert set(result.keys()) == set(values.keys()) + + # "World" node does not appear in results + assert set(result["output"]["node_loc"].unique()) == set(info.N[1:]) + + for df in result.values(): + # Results have 2 nodes × 3 years + assert len(df) == 2 * 3 + # No empty values + assert not df.isna().any(None) + + def test_package_data_path(*parts, suffix=None): assert MESSAGE_MODELS_PATH.joinpath("data", "foo", "bar") == package_data_path( "foo", "bar" diff --git a/message_ix_models/util/__init__.py b/message_ix_models/util/__init__.py index 265d618a29..4314d4006a 100644 --- a/message_ix_models/util/__init__.py +++ b/message_ix_models/util/__init__.py @@ -1,7 +1,8 @@ import logging +from collections import defaultdict from copy import copy from pathlib import Path -from typing import Any, Dict, List, Mapping, Optional, Union, cast +from typing import Any, Dict, List, Mapping, Optional, Sequence, Union, cast import message_ix import pandas as pd @@ -129,6 +130,38 @@ def as_codes(data: Union[List[str], Dict[str, Dict]]) -> List[Code]: return list(result.values()) +def broadcast(df, **kwargs): + """Fill missing data in `df` by broadcasting. + + Arguments + --------- + kwargs + Keys are dimensions. Values are labels along that dimension to fill. + """ + for dim, levels in kwargs.items(): + assert df[dim].isna().all(), ("Dimension {dim} was not empty", df.head()) + + df = ( + pd.concat({level: df for level in levels}, names=[dim]) + .drop(dim, axis=1) + .reset_index(dim) + ) + return df + + +def copy_column(column_name): + """For use with :meth:`pandas.DataFrame.assign`. + + Examples + -------- + Modify `df` by filling the column 'baz' with the value ``3``, and copying + the column 'bar' into column 'foo'. + + >>> df.assign(foo=copy_column('bar'), baz=3) + """ + return lambda df: df[column_name] + + def eval_anno(obj: AnnotableArtefact, id: str): """Retrieve the annotation `id` from `obj`, run :func:`eval` on its contents. @@ -150,6 +183,48 @@ def eval_anno(obj: AnnotableArtefact, id: str): return value +def ffill( + df: pd.DataFrame, dim: str, values: Sequence[Union[str, Code]], expr: str = None +) -> pd.DataFrame: + """Forward-fill `df` on `dim` to cover `values`. + + Parameters + ---------- + df : .DataFrame + Data to fill forwards. + dim : str + Dimension to fill along. Must be a column in `df`. + labels : list of str + Labels along `dim` that must be present in the returned data frame. + expr : str, optional + If provided, :meth:`.DataFrame.eval` is called. This can be used to assign one + column to another. For instance, if `dim` == "year_vtg" and `expr` is "year_act + = year_vtg", then forward filling is performed along the "year_vtg" dimension/ + column, and then the filled values are copied to the "year_act" column. + """ + if dim in ("value", "unit"): + raise ValueError(dim) + + # Mapping from (values existing in `df`) -> equal or greater members of `values` + mapping = defaultdict(set) + last_seen = None + for v in sorted(set(values) | set(df[dim].unique())): + if v in df[dim].unique(): + last_seen = v + mapping[last_seen].add(v) + + def _maybe_eval(df): + return df.eval(expr) if expr is not None else df + + dfs = [df] + for key, group_df in df.groupby(dim): + for new_label in sorted(mapping[key])[1:]: + # Duplicate the data; assign the new_label to `dim` + dfs.append(group_df.assign(**{dim: new_label}).pipe(_maybe_eval)) + + return pd.concat(dfs, ignore_index=True) + + def iter_parameters(set_name): """Iterate over MESSAGEix parameters with *set_name* as a dimension. @@ -261,6 +336,129 @@ def load_private_data(*parts: str) -> Mapping: # pragma: no cover (needs messag return _load(PRIVATE_DATA, MESSAGE_DATA_PATH / "data", *parts) +def make_io(src, dest, efficiency, on="input", **kwargs): + """Return input and output data frames for a 1-to-1 technology. + + Parameters + ---------- + src : tuple (str, str, str) + Input commodity, level, unit. + dest : tuple (str, str, str) + Output commodity, level, unit. + efficiency : float + Conversion efficiency. + on : 'input' or 'output' + If 'input', `efficiency` applies to the input, and the output, thus the + activity level of the technology, is in dest[2] units. If 'output', + the opposite. + kwargs + Passed to :func:`make_df`. + + Returns + ------- + dict (str -> pd.DataFrame) + Keys are 'input' and 'output'; values are data frames. + """ + return dict( + input=message_ix.make_df( + "input", + commodity=src[0], + level=src[1], + unit=src[2], + value=efficiency if on == "input" else 1.0, + **kwargs, + ), + output=message_ix.make_df( + "output", + commodity=dest[0], + level=dest[1], + unit=dest[2], + value=1.0 if on == "input" else efficiency, + **kwargs, + ), + ) + + +def make_matched_dfs(base, **par_value): + """Return data frames derived from *base* for multiple parameters. + + *par_values* maps from parameter names (e.g. 'fix_cost') to values. + make_matched_dfs returns a :class:`dict` of :class:`pandas.DataFrame`, one + for each parameter in *par_value*. The contents of *base* are used to + populate the columns of each data frame, and the values of *par_value* + overwrite the 'value' column. Duplicates—which occur when the target + parameter has fewer dimensions than *base*—are dropped. + + Examples + -------- + >>> input = make_df('input', ...) + >>> cf_tl = make_matched_dfs( + >>> input, + >>> capacity_factor=1, + >>> technical_lifetime=1, + >>> ) + """ + data = {col: v for col, v in base.iteritems() if col != "value"} + return { + par: message_ix.make_df(par, **data, value=value).drop_duplicates() + for par, value in par_value.items() + } + + +def make_source_tech(info, common, **values) -> Mapping[str, pd.DataFrame]: + """Return parameter data for a ‘source’ technology. + + The technology has no inputs; its output commodity and/or level are + determined by `common`; either single values, or :obj:`None` if the + result will be :meth:`~DataFrame.pipe`'d through :func:`broadcast`. + + Parameters + ---------- + info : ScenarioInfo + common : dict + Passed to :func:`make_df`. + **values + Values for 'capacity_factor' (optional; default 1.0), 'output', + 'technical_lifetime', and 'var_cost'. + + Returns + ------- + dict + Suitable for :func:`add_par_data`. + """ + # Check arguments + values.setdefault("capacity_factor", 1.0) + missing = {"capacity_factor", "output", "technical_lifetime", "var_cost"} - set( + values.keys() + ) + if len(missing): + raise ValueError(f"make_dummy_source() needs values for {repr(missing)}") + + # Create data for "output" + output = ( + message_ix.make_df( + "output", + value=values.pop("output"), + year_act=info.Y, + year_vtg=info.Y, + **common, + ) + .pipe(broadcast, node_loc=info.N[1:]) + .pipe(same_node) + ) + result = make_matched_dfs(base=output, **values) + result["output"] = output + + return result + + +def merge_data(base, *others): + """Merge dictionaries of DataFrames together into `base`.""" + for other in others: + for par, df in other.items(): + base[par] = base[par].append(df) if par in base else df + + def package_data_path(*parts) -> Path: """Construct a path to a file under :file:`message_ix_models/data/`.""" return _make_path(MESSAGE_MODELS_PATH / "data", *parts) @@ -271,6 +469,12 @@ def private_data_path(*parts) -> Path: # pragma: no cover (needs message_data) return _make_path(cast(Path, MESSAGE_DATA_PATH) / "data", *parts) +def same_node(df): + """Fill 'node_origin'/'node_dest' in `df` from 'node_loc'.""" + cols = list(set(df.columns) & {"node_origin", "node_dest"}) + return df.assign(**{c: copy_column("node_loc") for c in cols}) + + def strip_par_data( scenario, set_name, value, dry_run=False, dump: Dict[str, pd.DataFrame] = None ): From 9ceb98828a353961cfbac03f7fc941e8beca01c9 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Wed, 31 Mar 2021 17:08:03 +0200 Subject: [PATCH 02/29] Tolerate missing parameters in .util.strip_par_data() --- message_ix_models/util/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/message_ix_models/util/__init__.py b/message_ix_models/util/__init__.py index 4314d4006a..a7927e8f8a 100644 --- a/message_ix_models/util/__init__.py +++ b/message_ix_models/util/__init__.py @@ -495,10 +495,11 @@ def strip_par_data( # Iterate over parameters with ≥1 dimensions indexed by `set_name` for par_name in iter_parameters(set_name): if par_name not in par_list: - raise RuntimeError( # pragma: no cover + log.warning( # pragma: no cover f"MESSAGEix parameter {repr(par_name)} missing in Scenario " f"{scenario.model}/{scenario.scenario}" ) + continue # Iterate over dimensions indexed by `set_name` for dim, _ in filter( From 09a36d6f41a06f8bf93a235a75ec9c9491fadc4f Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Wed, 31 Mar 2021 17:09:39 +0200 Subject: [PATCH 03/29] =?UTF-8?q?Allow=20testing.bare=5Fres(None,=20?= =?UTF-8?q?=E2=80=A6)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- message_ix_models/testing.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/message_ix_models/testing.py b/message_ix_models/testing.py index dc47615c53..0a36570c0c 100644 --- a/message_ix_models/testing.py +++ b/message_ix_models/testing.py @@ -157,8 +157,9 @@ def bare_res(request, context: Context, solved: bool = False) -> message_ix.Scen Parameters ---------- - request : .Request - The pytest :fixture:`pytest:request` fixture. + request : .Request or None + The pytest :fixture:`pytest:request` fixture. If provided the pytest test node + name is used for the scenario name of the returned Scenario. context : .Context Passed to :func:`.testing.bare_res`. solved : bool, optional @@ -188,5 +189,10 @@ def bare_res(request, context: Context, solved: bool = False) -> message_ix.Scen log.info("Solve") base.solve(solve_options=dict(lpmethod=4), quiet=True) - log.info(f"Clone to '{name}/{request.node.name}'") - return base.clone(scenario=request.node.name, keep_solution=solved) + try: + new_name = request.node.name + except AttributeError: + new_name = "baseline" + + log.info(f"Clone to '{name}/{new_name}'") + return base.clone(scenario=new_name, keep_solution=solved) From e0bb48d5e216050ab002f8a1527d2068f1b487a8 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Wed, 31 Mar 2021 17:10:06 +0200 Subject: [PATCH 04/29] Add .model.disutility from message_data; docs --- doc/api/disutility.rst | 123 ++++++++++++++ doc/index.rst | 1 + message_ix_models/model/disutility.py | 229 ++++++++++++++++++++++++++ 3 files changed, 353 insertions(+) create mode 100644 doc/api/disutility.rst create mode 100644 message_ix_models/model/disutility.py diff --git a/doc/api/disutility.rst b/doc/api/disutility.rst new file mode 100644 index 0000000000..1ab8d663f9 --- /dev/null +++ b/doc/api/disutility.rst @@ -0,0 +1,123 @@ +.. currentmodule:: message_data.model.disutility + +Consumer disutility +******************* + +This module provides a generalized consumer disutility formulation, currently used by :mod:`message_data.model.transport`. + +The formulation rests on the concept of “consumer groups.” +Each consumer group may have a distinct disutility for using the outputs of each technology. + + +Method & usage +============== + +Use this code by calling :func:`add`, which takes arguments that describe the concrete usage: + +Consumer groups + This is a list of :class:`.Code` objects describing the consumer groups. + The list must be 1-dimensional, but can be composed (as in :mod:`message_data.model.transport`) from multiple dimensions. + +Technologies + This is a list of :class:`.Code` objects describing the technologies for which the consumers in the different groups experience disutility. + Each object must be have 'input' and 'output' annotations (:attr:`.Code.anno`); each of these is a :class:`dict` with the keys 'commodity', 'input', and 'unit', describing the source or sink for the technology. + +Template + This is also a :class:`.Code` object, similar to those in ``technologies``; see below. + + +The code does *not* do the following steps needed to completely parametrize the formulation: + +- Set consumer group-specific 'demand' parameter values for new commodities. +- Create a source technology for the “disutility” commodity. + + +Detailed example +================ + +From :func:`.transport.build.main`: + +.. code-block:: python + + # Add generalized disutility formulation to LDV technologies + disutility.add( + scenario, + + # Generate a list of consumer groups + consumer_groups=consumer_groups(), + + # Generate a list of technologies + technologies=generate_set_elements("technology", "LDV"), + + template=Code( + # Template for IDs of conversion technologies + id="transport {technology} usage", + + # Templates for inputs of conversion technologies + input=dict( + # Technology-specific output commodity + commodity="transport vehicle {technology}", + level="useful", + unit="km", + ), + + # Templates for outputs of conversion technologies + output=dict( + # Consumer-group–specific demand commodity + commodity="transport pax {mode}", + level="useful", + unit="km", + ), + ), + **options, + ) + + +:func:`add` uses :func:`get_spec` to generate a specification that adds the following: + +- A single 'commodity' set element, “disutility”. + +- 1 'mode' set element per element in ``consumer_groups``. + + **Example:** the function :func:`.consumer_groups` returns codes like “RUEAA”, “URLMF”, etc.; one 'mode' is created for each such group. + +- 1 'commodity' set element per technology in ``technologies``. + ``template.anno["input"]["commodity"]`` is used to generate the IDs of these commodities. + + **Example:** “transport vehicle {technology}” is used to generate a commodity “transport vehicles ELC_100” associated with the technology with the ID “ELC_100”. + +- 1 'commodity' set element per consumer group. + ``template.anno["output"]["commodity"]`` is used to generate the IDs of these commodities. + + **Example:** “transport pax {mode}” is used with to generate a commodity “transport pax RUEAA” is associated with the consumer group with ID “RUEAA”. + +- 1 additional 'technology' set element per disutility-affected technology. + ``template.id`` is used to generate the IDs of these technologies. + + **Example:** “transport {technology} usage}” is used to generate “transport ELC_100 usage” associated with the existing technology “ELC_100”. + + +The spec is applied to the target scenario using :func:`.model.build.apply_spec`. +If the arguments produce a spec that is inconsistent with the target scenario, an exception will by raised at this point. + + +Next, :func:`add` uses :func:`disutility_conversion` to generate data for the 'input' and 'output' parameters, as follows: + +- Existing, disutility-affected technologies (those listed in the ``technologies`` argument) 'output' to technology-specific commodities. + + **Example:** the technology “ELC_100” outputs to the commodity “transport vehicle ELC_100”, instead of to a common/pooled commodity such as “transport vehicle”. + +- New, conversion technologies have one 'mode' per consumer group. + + **Example:** the new technology “transport ELC_100 usage” + + - …in “all” modes—takes the *same* quantity of input from the *technology-specific* commodity “transport ELC_100 vehicle”. + - …in each consumer-group specific mode e.g. “RUEAA”—takes a *group-specific* quantity of input from the common commodity “disutility”. + - …in each consumer-group specific mode e.g. “RUEAA”—outputs to a *group-specific* commodity, e.g. “transport pax RUEAA”. + + +Code reference +============== + +.. automodule:: message_ix_models.model.disutility + :members: diff --git a/doc/index.rst b/doc/index.rst index 830c41d083..3e64193ab0 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -23,6 +23,7 @@ These models are built in the `MESSAGEix framework ` api/model api/model-bare api/model-build + api/disutility api/project api/tools api/util diff --git a/message_ix_models/model/disutility.py b/message_ix_models/model/disutility.py new file mode 100644 index 0000000000..9fff6a1ef4 --- /dev/null +++ b/message_ix_models/model/disutility.py @@ -0,0 +1,229 @@ +from collections import defaultdict +from functools import lru_cache, partial +from typing import Mapping +import logging + +import pandas as pd +from sdmx.model import Annotation, Code +from message_ix_models import ScenarioInfo +from message_ix_models.model.build import apply_spec +from message_ix_models.util import ( + broadcast, + eval_anno, + make_io, + make_matched_dfs, + make_source_tech, + merge_data, + same_node, +) + + +log = logging.getLogger(__name__) + + +def add(scenario, consumer_groups, technologies, template, **options): + """Add disutility formulation to `scenario`.""" + # Generate the spec given the configuration options + spec = get_spec(scenario, consumer_groups, technologies, template) + + # Apply spec and add data + apply_spec(scenario, spec, partial(get_data, spec=spec), **options) + + +def get_spec(scenario, consumer_groups, technologies, template): + """Get a spec for a disutility formulation.""" + require = ScenarioInfo() + remove = ScenarioInfo() + add = ScenarioInfo() + + require.set["technology"] = technologies + + # Disutility commodity and source + add.set["commodity"] = [Code(id="disutility")] + add.set["technology"] = [Code(id="disutility source")] + + # Add consumer groups + for cg in consumer_groups: + add.set["mode"].append(Code(id=cg.id, name=f"Production for {cg.id}")) + + # Add conversion technologies + for t in technologies: + # String formatting arguments + fmt = dict(technology=t) + + # - Format the ID string from the template + # - Copy the "output" annotation without modification + t_code = Code( + id=template.id.format(**fmt), + annotations=[template.get_annotation(id="output")], + ) + + # Format each field in the "input" annotation + input = eval(str(template.get_annotation(id="input").text)) + t_code.annotations.append( + Annotation( + id="input", text=repr({k: v.format(**fmt) for k, v in input.items()}) + ) + ) + + add.set["technology"].append(t_code) + + return dict(require=require, remove=remove, add=add) + + +def get_data(scenario, spec, **kwargs) -> Mapping[str, pd.DataFrame]: + """Get data for disutility formulation. + + Calls :meth:`data_conversion` and :meth:`data_source`. + + Parameters + ---------- + spec : dict + The output of :meth:`get_spec`. + """ + if len(kwargs): + log.warning(f"Ignore {repr(kwargs)}") + + info = ScenarioInfo(scenario) + + # Get conversion technology data + data = data_conversion(info, spec) + + # Get and append source data + merge_data(data, data_source(info, spec)) + + return data + + +def data_conversion(info, spec) -> Mapping[str, pd.DataFrame]: + """Input and output data for disutility conversion technologies.""" + common = dict( + year_vtg=info.Y, + year_act=info.Y, + # No subannual detail + time="year", + time_origin="year", + time_dest="year", + ) + + # Use the spec to retrieve information + technology = spec["add"].set["technology"] + mode = list(map(str, spec["add"].set["mode"])) + + # Data to return + data = defaultdict(list) + + # Loop over technologies + for t in technology: + # Use the annotations on the technology Code to get information about the + # commodity, level, and unit + input = eval_anno(t, "input") + output = eval_anno(t, "output") + if input is output is None: + if t.id == "disutility source": + continue # Data for this tech is from disutility_source() + else: + raise ValueError(t) # Error in user input + + # Helper functions for output + @lru_cache() + def oc_for_mode(mode): + # Format the output commodity id given the mode id + return output["commodity"].format(mode=mode) + + def output_commodity(df): + # Return a series with output commodity based on mode + return df["mode"].apply(oc_for_mode) + + # Make input and output data frames + i_o = make_io( + (input["commodity"], input["level"], input["unit"]), + (None, output["level"], output["unit"]), + 1.0, + on="output", + technology=t.id, + **common, + ) + for par, df in i_o.items(): + # Broadcast across nodes + df = df.pipe(broadcast, node_loc=info.N[1:]).pipe(same_node) + if par == "input": + # Common across modes + data[par].append(df.assign(mode="all")) + + # Disutility inputs differ by mode + data[par].append( + df.assign(commodity="disutility").pipe(broadcast, mode=mode) + ) + elif par == "output": + # - Broadcast across modes + # - Use a function to set the output commodity based on the + # mode + data[par].append( + df.pipe(broadcast, mode=mode).assign(commodity=output_commodity) + ) + + # Concatenate to a single data frame per parameter + data = {par: pd.concat(dfs) for par, dfs in data.items()} + + # Create data for capacity_factor and technical_lifetime + data.update( + make_matched_dfs( + base=data["input"], + capacity_factor=1, + # TODO get this from ScenarioInfo + technical_lifetime=10, + # commented: activity constraints for the technologies + # TODO get these values from an argument + growth_activity_lo=-0.5, + # growth_activity_up=0.5, + # initial_activity_up=1., + # soft_activity_lo=-0.5, + # soft_activity_up=0.5, + ) + ) + # Remove growth_activity_lo for first year + data["growth_activity_lo"] = data["growth_activity_lo"].query( + f"year_act > {spec['add'].y0}" + ) + + # commented: initial activity constraints for the technologies + # data.update( + # make_matched_dfs(base=data["output"], initial_activity_up=2.) + # ) + + return data + + +def data_source(info, spec) -> Mapping[str, pd.DataFrame]: + """Generate data for a technology that emits the disutility commodity.""" + # List of input levels where disutility commodity must exist + levels = set() + for t in spec["add"].set["technology"]: + input = eval_anno(t, "input") + if input: + levels.add(input["level"]) + else: + # "disutility source" technology has no annotations + continue + + log.info(f"Generate disutility on level(s): {repr(levels)}") + + result = make_source_tech( + info, + common=dict( + commodity="disutility", + mode="all", + technology="disutility source", + time="year", + time_dest="year", + unit="-", + ), + output=1.0, + var_cost=1.0, + # TODO get this from ScenarioInfo + technical_lifetime=10, + ) + result["output"] = result["output"].pipe(broadcast, level=sorted(levels)) + + return result From b7bad56ef6132b7eb9dbeef117985ea7cd06efa9 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Thu, 1 Apr 2021 13:45:20 +0200 Subject: [PATCH 05/29] Remove unused `scenario` arg to .disutility.get_spec(); annotate types --- message_ix_models/model/disutility.py | 34 +++++++++++++++++++++++---- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/message_ix_models/model/disutility.py b/message_ix_models/model/disutility.py index 9fff6a1ef4..762794324e 100644 --- a/message_ix_models/model/disutility.py +++ b/message_ix_models/model/disutility.py @@ -1,10 +1,12 @@ from collections import defaultdict from functools import lru_cache, partial -from typing import Mapping +from typing import Dict, Mapping, Sequence, Union import logging +import message_ix import pandas as pd from sdmx.model import Annotation, Code + from message_ix_models import ScenarioInfo from message_ix_models.model.build import apply_spec from message_ix_models.util import ( @@ -20,18 +22,40 @@ log = logging.getLogger(__name__) +CodeLike = Union[str, Code] + -def add(scenario, consumer_groups, technologies, template, **options): +def add( + scenario: message_ix.Scenario, + groups: Sequence[CodeLike], + technologies: Sequence[CodeLike], + template: Code, + **options, +) -> None: """Add disutility formulation to `scenario`.""" # Generate the spec given the configuration options - spec = get_spec(scenario, consumer_groups, technologies, template) + spec = get_spec(groups, technologies, template) # Apply spec and add data apply_spec(scenario, spec, partial(get_data, spec=spec), **options) -def get_spec(scenario, consumer_groups, technologies, template): - """Get a spec for a disutility formulation.""" +def get_spec( + groups: Sequence[CodeLike], + technologies: Sequence[CodeLike], + template: Code, +) -> Dict[str, ScenarioInfo]: + """Get a spec for a disutility formulation. + + Parameters + ---------- + groups : list of Code + Identities of the consumer groups with distinct disutilities. + technologies : list of Code + The technologies to which the disutilities are applied. + template : .Code + + """ require = ScenarioInfo() remove = ScenarioInfo() add = ScenarioInfo() From 17922e57c3069ae109e49e9a878e0fc5180fc8c4 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Thu, 1 Apr 2021 13:46:38 +0200 Subject: [PATCH 06/29] Include input and output commodities in .disutility.get_spec() --- message_ix_models/model/disutility.py | 28 ++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/message_ix_models/model/disutility.py b/message_ix_models/model/disutility.py index 762794324e..0a8425cd07 100644 --- a/message_ix_models/model/disutility.py +++ b/message_ix_models/model/disutility.py @@ -67,31 +67,41 @@ def get_spec( add.set["technology"] = [Code(id="disutility source")] # Add consumer groups - for cg in consumer_groups: - add.set["mode"].append(Code(id=cg.id, name=f"Production for {cg.id}")) + for g in groups: + add.set["mode"].append(Code(id=g.id, name=f"Production for {g.id}")) # Add conversion technologies for t in technologies: # String formatting arguments fmt = dict(technology=t) + # Format each field in the "input" and "output" annotations + input = {k: v.format(**fmt) for k, v in eval_anno(template, id="input").items()} + output = eval_anno(template, id="output") + # - Format the ID string from the template # - Copy the "output" annotation without modification t_code = Code( id=template.id.format(**fmt), - annotations=[template.get_annotation(id="output")], + annotations=[ + template.get_annotation(id="output"), + Annotation(id="input", text=repr(input)), + ], ) - # Format each field in the "input" annotation - input = eval(str(template.get_annotation(id="input").text)) - t_code.annotations.append( - Annotation( - id="input", text=repr({k: v.format(**fmt) for k, v in input.items()}) - ) + # "commodity" set elements to add + add.set["commodity"].append(input["commodity"]) + add.set["commodity"].extend( + output["commodity"].format(mode=g.id) for g in groups ) + # "technology" set elements to add + t_code.annotations.append(Annotation(id="input", text=repr(input))) add.set["technology"].append(t_code) + # Deduplicate "commodity" set elements + add.set["commodity"] = sorted(set(add.set["commodity"])) + return dict(require=require, remove=remove, add=add) From bfcd01bf051f04021130bf5fd6abd29bf80b8d20 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Thu, 1 Apr 2021 13:47:06 +0200 Subject: [PATCH 07/29] Improve checks in .util.broadcast() --- message_ix_models/util/__init__.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/message_ix_models/util/__init__.py b/message_ix_models/util/__init__.py index a7927e8f8a..5cdc08f964 100644 --- a/message_ix_models/util/__init__.py +++ b/message_ix_models/util/__init__.py @@ -139,7 +139,13 @@ def broadcast(df, **kwargs): Keys are dimensions. Values are labels along that dimension to fill. """ for dim, levels in kwargs.items(): - assert df[dim].isna().all(), ("Dimension {dim} was not empty", df.head()) + # Checks + assert df[dim].isna().all(), f"Dimension {dim} was not empty\n\n{df.head()}" + if len(levels) == 0: + log.debug( + f"Don't broadcast over {repr(dim)}; labels {levels} have length 0" + ) + continue df = ( pd.concat({level: df for level in levels}, names=[dim]) @@ -408,9 +414,9 @@ def make_matched_dfs(base, **par_value): def make_source_tech(info, common, **values) -> Mapping[str, pd.DataFrame]: """Return parameter data for a ‘source’ technology. - The technology has no inputs; its output commodity and/or level are - determined by `common`; either single values, or :obj:`None` if the - result will be :meth:`~DataFrame.pipe`'d through :func:`broadcast`. + The technology has no inputs; its output commodity and/or level are determined by + `common`; either single values, or :obj:`None` if the result will be + :meth:`~DataFrame.pipe`'d through :func:`broadcast`. Parameters ---------- From b025e61a5aa6deca76ce21fe4b8099985bb8d235 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Thu, 1 Apr 2021 13:55:01 +0200 Subject: [PATCH 08/29] Add tests of .model.disutility --- message_ix_models/model/disutility.py | 2 +- .../tests/model/test_disutility.py | 196 ++++++++++++++++++ 2 files changed, 197 insertions(+), 1 deletion(-) create mode 100644 message_ix_models/tests/model/test_disutility.py diff --git a/message_ix_models/model/disutility.py b/message_ix_models/model/disutility.py index 0a8425cd07..9cf2ea741f 100644 --- a/message_ix_models/model/disutility.py +++ b/message_ix_models/model/disutility.py @@ -100,7 +100,7 @@ def get_spec( add.set["technology"].append(t_code) # Deduplicate "commodity" set elements - add.set["commodity"] = sorted(set(add.set["commodity"])) + add.set["commodity"] = sorted(map(str, set(add.set["commodity"]))) return dict(require=require, remove=remove, add=add) diff --git a/message_ix_models/tests/model/test_disutility.py b/message_ix_models/tests/model/test_disutility.py new file mode 100644 index 0000000000..6f404744bd --- /dev/null +++ b/message_ix_models/tests/model/test_disutility.py @@ -0,0 +1,196 @@ +import pandas as pd +import pytest +from message_ix import make_df +from sdmx.model import Annotation, Code + +from message_ix_models import ScenarioInfo, testing +from message_ix_models.model import disutility +from message_ix_models.util import ( + add_par_data, + copy_column, + make_source_tech, + merge_data, +) + +# Common data and fixtures for test_minimal() and other tests + +COMMON = dict( + level="useful", + node_dest="R14_AFR", + node_loc="R14_AFR", + node_origin="R14_AFR", + node="R14_AFR", + time_dest="year", + time_origin="year", + time="year", + unit="kg", +) + + +@pytest.fixture +def groups(): + """List of two consumer groups.""" + yield [Code(id="g0"), Code(id="g1")] + + +@pytest.fixture +def techs(): + """List of two technologies, for which groups may have different disutilities.""" + yield [Code(id="t0"), Code(id="t1")] + + +@pytest.fixture +def template(): + """:class:.`Code` object with annotations, for :func:`.disutility.get_spec`.""" + # Template for inputs of conversion technologies, from a technology-specific + # commodity + input = dict(commodity="output of {technology}", level="useful", unit="kg") + + # Template for outputs of conversion technologies, to a group–specific demand + # commodity + output = dict(commodity="demand of group {mode}", level="useful", unit="kg") + + # Code's ID is itself a template for IDs of conversion technologies + yield Code( + id="{technology} usage", + annotations=[ + Annotation(id="input", text=repr(input)), + Annotation(id="output", text=repr(output)), + ], + ) + + +@pytest.fixture +def spec(groups, techs, template): + """A prepared spec for the minimal test case.""" + yield disutility.get_spec(groups, techs, template) + + +@pytest.fixture +def scenario(request, test_context, techs): + """A :class:`.Scenario` with technologies given by :func:`techs`.""" + s = testing.bare_res(request, test_context, solved=False) + s.check_out() + + s.add_set("technology", ["t0", "t1"]) + + s.commit("Test fixture for .model.disutility") + yield s + + +def test_add(scenario, groups, techs, template): + """:func:`.disutility.add` runs on the bare RES; the result solves.""" + disutility.add(scenario, groups, techs, template) + + # Scenario solves (no demand) + scenario.solve(quiet=True) + assert (scenario.var("ACT")["lvl"] == 0).all() + + +def test_minimal(scenario, groups, techs, template): + """Minimal test case for disutility formulation.""" + disutility.add(scenario, groups, techs, template) + + # Fill in the data for the test case + + common = COMMON.copy() + common.pop("node_loc") + common.update(dict(mode="all")) + + data = dict() + + for t in ("t0", "t1"): + common.update(dict(technology=t, commodity=f"output of {t}")) + merge_data( + data, + make_source_tech( + ScenarioInfo(scenario), + common, + output=1.0, + technical_lifetime=5.0, + var_cost=0.0, + ), + ) + + # For each combination of (tech) × (group) × (2 years) + df = pd.DataFrame( + [ + ["g0", "output of t0", "t0 usage", 2020, 1.0], + ["g0", "output of t0", "t0 usage", 2025, 1.0], + ["g0", "output of t1", "t1 usage", 2020, 1.0], + ["g0", "output of t1", "t1 usage", 2025, 1.0], + ["g1", "output of t0", "t0 usage", 2020, 1.0], + ["g1", "output of t0", "t0 usage", 2025, 1.0], + ["g1", "output of t1", "t1 usage", 2020, 1.0], + ["g1", "output of t1", "t1 usage", 2025, 1.0], + ], + columns=["mode", "commodity", "technology", "year_vtg", "value"], + ) + data["input"] = make_df("input", **df, **COMMON).assign( + node_origin=copy_column("node_loc"), year_act=copy_column("year_vtg") + ) + + data["demand"] = make_df( + "demand", + **pd.DataFrame( + [ + ["demand of group g0", 2020, 1.0], + ["demand of group g0", 2025, 1.0], + ["demand of group g1", 2020, 1.0], + ["demand of group g1", 2025, 1.0], + ], + columns=["commodity", "year", "value"], + ), + **COMMON, + ) + + scenario.check_out() + add_par_data(scenario, data) + scenario.commit("Disutility test 1") + + scenario.solve(quiet=True) + + ACT = scenario.var("ACT").query("lvl > 0").drop(columns=["node_loc", "time", "mrg"]) + + # For debugging TODO comment before merging + print(ACT) + + +def test_data_conversion(scenario, spec): + """:func:`~.disutility.data_conversion` runs.""" + info = ScenarioInfo(scenario) + disutility.data_conversion(info, spec) + + +def test_data_source(scenario, spec): + """:func:`~.disutility.data_source` runs.""" + info = ScenarioInfo(scenario) + disutility.data_source(info, spec) + + +def test_get_data(scenario, spec): + """:func:`~.disutility.get_data` runs.""" + disutility.get_data(scenario, spec) + + +def test_get_spec(groups, techs, template): + """:func:`~.disutility.get_spec` runs and produces expected output.""" + spec = disutility.get_spec(groups, techs, template) + + # Spec requires the existence of the base technologies + assert {"technology"} == set(spec["require"].set.keys()) + assert techs == spec["require"].set["technology"] + + # Spec removes nothing + assert set() == set(spec["remove"].set.keys()) + + # Spec adds the "disutility" commodity + assert {"disutility"} == set(map(str, spec["add"].set["commodity"])) + + # Spec adds the "distuility source" technology, and "{tech} usage" for each tech, + # per the template + assert {"disutility source", "t0 usage", "t1 usage"} == set( + map(str, spec["add"].set["technology"]) + ) + # Spec adds two modes + assert {"g0", "g1"} == set(map(str, spec["add"].set["mode"])) From 58c3abf0a4846365c7d6069a81d59497002c35ba Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Thu, 1 Apr 2021 14:02:57 +0200 Subject: [PATCH 09/29] Sort imports; satisfy mypy --- message_ix_models/model/disutility.py | 28 +++++++++++++-------------- message_ix_models/util/__init__.py | 2 +- 2 files changed, 14 insertions(+), 16 deletions(-) diff --git a/message_ix_models/model/disutility.py b/message_ix_models/model/disutility.py index 9cf2ea741f..4bf9156450 100644 --- a/message_ix_models/model/disutility.py +++ b/message_ix_models/model/disutility.py @@ -1,7 +1,7 @@ +import logging from collections import defaultdict from functools import lru_cache, partial -from typing import Dict, Mapping, Sequence, Union -import logging +from typing import Dict, List, Mapping, Sequence, Union import message_ix import pandas as pd @@ -19,7 +19,6 @@ same_node, ) - log = logging.getLogger(__name__) CodeLike = Union[str, Code] @@ -27,8 +26,8 @@ def add( scenario: message_ix.Scenario, - groups: Sequence[CodeLike], - technologies: Sequence[CodeLike], + groups: Sequence[Code], + technologies: Sequence[Code], template: Code, **options, ) -> None: @@ -41,8 +40,8 @@ def add( def get_spec( - groups: Sequence[CodeLike], - technologies: Sequence[CodeLike], + groups: Sequence[Code], + technologies: Sequence[Code], template: Code, ) -> Dict[str, ScenarioInfo]: """Get a spec for a disutility formulation. @@ -60,15 +59,14 @@ def get_spec( remove = ScenarioInfo() add = ScenarioInfo() - require.set["technology"] = technologies + require.set["technology"].extend(technologies) # Disutility commodity and source add.set["commodity"] = [Code(id="disutility")] add.set["technology"] = [Code(id="disutility source")] # Add consumer groups - for g in groups: - add.set["mode"].append(Code(id=g.id, name=f"Production for {g.id}")) + add.set["mode"].extend(Code(id=g.id, name=f"Production for {g.id}") for g in groups) # Add conversion technologies for t in technologies: @@ -145,7 +143,7 @@ def data_conversion(info, spec) -> Mapping[str, pd.DataFrame]: mode = list(map(str, spec["add"].set["mode"])) # Data to return - data = defaultdict(list) + data0: Mapping[str, List[pd.DataFrame]] = defaultdict(list) # Loop over technologies for t in technology: @@ -183,22 +181,22 @@ def output_commodity(df): df = df.pipe(broadcast, node_loc=info.N[1:]).pipe(same_node) if par == "input": # Common across modes - data[par].append(df.assign(mode="all")) + data0[par].append(df.assign(mode="all")) # Disutility inputs differ by mode - data[par].append( + data0[par].append( df.assign(commodity="disutility").pipe(broadcast, mode=mode) ) elif par == "output": # - Broadcast across modes # - Use a function to set the output commodity based on the # mode - data[par].append( + data0[par].append( df.pipe(broadcast, mode=mode).assign(commodity=output_commodity) ) # Concatenate to a single data frame per parameter - data = {par: pd.concat(dfs) for par, dfs in data.items()} + data = {par: pd.concat(dfs) for par, dfs in data0.items()} # Create data for capacity_factor and technical_lifetime data.update( diff --git a/message_ix_models/util/__init__.py b/message_ix_models/util/__init__.py index 5cdc08f964..0a8d711d8d 100644 --- a/message_ix_models/util/__init__.py +++ b/message_ix_models/util/__init__.py @@ -411,7 +411,7 @@ def make_matched_dfs(base, **par_value): } -def make_source_tech(info, common, **values) -> Mapping[str, pd.DataFrame]: +def make_source_tech(info, common, **values) -> Dict[str, pd.DataFrame]: """Return parameter data for a ‘source’ technology. The technology has no inputs; its output commodity and/or level are determined by From 54720092eb2e2888a12a06f419c545c060d7493c Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Thu, 1 Apr 2021 14:27:23 +0200 Subject: [PATCH 10/29] Install GAMS license on GHA runner --- .github/workflows/pytest.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/workflows/pytest.yaml b/.github/workflows/pytest.yaml index c27c642790..c06b81fc8f 100644 --- a/.github/workflows/pytest.yaml +++ b/.github/workflows/pytest.yaml @@ -98,6 +98,14 @@ jobs: run: ixmp/ci/install-gams.sh shell: bash + - name: Install GAMS license + env: + GAMS_LICENSE: ${ secrets.GAMS_LICENSE } + run: | + echo "$GAMS_LICENSE" > $(dirname $(which gams))/gamslice.txt + gams + shell: bash + - name: Upgrade pip, wheel, setuptools-scm run: python -m pip install --upgrade pip wheel setuptools-scm From ed0c48c7f0b5b26462f45d848fb5f7ea4d5ade82 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Thu, 1 Apr 2021 14:28:24 +0200 Subject: [PATCH 11/29] Reflow docstrings in .util --- message_ix_models/util/__init__.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/message_ix_models/util/__init__.py b/message_ix_models/util/__init__.py index 0a8d711d8d..a8780ec079 100644 --- a/message_ix_models/util/__init__.py +++ b/message_ix_models/util/__init__.py @@ -160,8 +160,8 @@ def copy_column(column_name): Examples -------- - Modify `df` by filling the column 'baz' with the value ``3``, and copying - the column 'bar' into column 'foo'. + Modify `df` by filling the column 'baz' with the value ``3``, and copying the column + 'bar' into column 'foo'. >>> df.assign(foo=copy_column('bar'), baz=3) """ @@ -354,9 +354,8 @@ def make_io(src, dest, efficiency, on="input", **kwargs): efficiency : float Conversion efficiency. on : 'input' or 'output' - If 'input', `efficiency` applies to the input, and the output, thus the - activity level of the technology, is in dest[2] units. If 'output', - the opposite. + If 'input', `efficiency` applies to the input, and the output, thus the activity + level of the technology, is in dest[2] units. If 'output', the opposite. kwargs Passed to :func:`make_df`. @@ -389,11 +388,11 @@ def make_matched_dfs(base, **par_value): """Return data frames derived from *base* for multiple parameters. *par_values* maps from parameter names (e.g. 'fix_cost') to values. - make_matched_dfs returns a :class:`dict` of :class:`pandas.DataFrame`, one - for each parameter in *par_value*. The contents of *base* are used to - populate the columns of each data frame, and the values of *par_value* - overwrite the 'value' column. Duplicates—which occur when the target - parameter has fewer dimensions than *base*—are dropped. + make_matched_dfs returns a :class:`dict` of :class:`pandas.DataFrame`, one for each + parameter in *par_value*. The contents of *base* are used to populate the columns + of each data frame, and the values of *par_value* overwrite the 'value' column. + Duplicates—which occur when the target parameter has fewer dimensions than + *base*—are dropped. Examples -------- From d724c72b2e16141dd028d4eca0aded1c2a7aa476 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Thu, 1 Apr 2021 14:47:07 +0200 Subject: [PATCH 12/29] Use iiasa/actions to set up GAMS --- .github/workflows/pytest.yaml | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/.github/workflows/pytest.yaml b/.github/workflows/pytest.yaml index c06b81fc8f..4f75d11af0 100644 --- a/.github/workflows/pytest.yaml +++ b/.github/workflows/pytest.yaml @@ -7,7 +7,6 @@ on: branches: [ main ] env: - GAMS_VERSION: 25.1.1 # For setuptools-scm. With fetch --tags below, this ensures that enough # history is fetched to contain the latest tag, so that setuptools-scm can # generate the version number. Update: @@ -91,20 +90,10 @@ jobs: ${{ matrix.os }}-gams${{ env.GAMS_VERSION }}- ${{ matrix.os }}- - - name: Install GAMS - # Use the scripts from the checked-out ixmp repo - env: - CI_OS: ${{ matrix.os }} - run: ixmp/ci/install-gams.sh - shell: bash - - - name: Install GAMS license - env: - GAMS_LICENSE: ${ secrets.GAMS_LICENSE } - run: | - echo "$GAMS_LICENSE" > $(dirname $(which gams))/gamslice.txt - gams - shell: bash + - uses: iiasa/actions/setup-gams@main + with: + version: 25.1.1 + license: ${{ secrets.GAMS_LICENSE }} - name: Upgrade pip, wheel, setuptools-scm run: python -m pip install --upgrade pip wheel setuptools-scm From 12cea8e4f21015f070923ba9c95ed5f5fdc49784 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Thu, 1 Apr 2021 17:57:11 +0200 Subject: [PATCH 13/29] Update test_disutility.test_get_spec() --- message_ix_models/tests/model/test_disutility.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/message_ix_models/tests/model/test_disutility.py b/message_ix_models/tests/model/test_disutility.py index 6f404744bd..b366f5049b 100644 --- a/message_ix_models/tests/model/test_disutility.py +++ b/message_ix_models/tests/model/test_disutility.py @@ -184,8 +184,15 @@ def test_get_spec(groups, techs, template): # Spec removes nothing assert set() == set(spec["remove"].set.keys()) - # Spec adds the "disutility" commodity - assert {"disutility"} == set(map(str, spec["add"].set["commodity"])) + # Spec adds the "disutility" commodity; and adds (if not existing) the output + # commodities for t[01] and demand commodities for g[01] + assert { + "disutility", + "output of t0", + "output of t1", + "demand of group g0", + "demand of group g1", + } == set(map(str, spec["add"].set["commodity"])) # Spec adds the "distuility source" technology, and "{tech} usage" for each tech, # per the template From b38c88d62f1310dbc09a4db4a4c8c9b5c30ed3b2 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Tue, 6 Apr 2021 15:06:04 +0200 Subject: [PATCH 14/29] Ensure unique indices from .util.broadcast and .util.make_matched_dfs --- message_ix_models/util/__init__.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/message_ix_models/util/__init__.py b/message_ix_models/util/__init__.py index a8780ec079..b294444e0d 100644 --- a/message_ix_models/util/__init__.py +++ b/message_ix_models/util/__init__.py @@ -147,10 +147,15 @@ def broadcast(df, **kwargs): ) continue + # - Duplicate the data + # - Drop the existing column named 'dim' + # - Re-add the column from the constructed MultiIndex + # - Reindex for sequential row numbers df = ( - pd.concat({level: df for level in levels}, names=[dim]) + pd.concat([df] * len(levels), keys=levels, names=[dim]) .drop(dim, axis=1) .reset_index(dim) + .reset_index() ) return df @@ -405,7 +410,9 @@ def make_matched_dfs(base, **par_value): """ data = {col: v for col, v in base.iteritems() if col != "value"} return { - par: message_ix.make_df(par, **data, value=value).drop_duplicates() + par: message_ix.make_df(par, **data, value=value) + .drop_duplicates() + .reset_index() for par, value in par_value.items() } From 09892904a494adcfa2e37c46aabc89711835039d Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Tue, 6 Apr 2021 15:06:27 +0200 Subject: [PATCH 15/29] Store duration_period when ScenarioInfo() is initialized from a Scenario --- message_ix_models/util/scenarioinfo.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/message_ix_models/util/scenarioinfo.py b/message_ix_models/util/scenarioinfo.py index b347d8e865..b39cb27b29 100644 --- a/message_ix_models/util/scenarioinfo.py +++ b/message_ix_models/util/scenarioinfo.py @@ -64,6 +64,9 @@ def __init__(self, scenario=None): except AttributeError: continue # pd.DataFrame for ≥2-D set; don't convert + for name in ("duration_period",): + self.par[name] = scenario.par(name) + self.is_message_macro = "PRICE_COMMODITY" in scenario.par_list() # Computed once From 3672ff9501ed29f04a96b6400d8dd14ab6f3a5d2 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Tue, 6 Apr 2021 16:43:52 +0200 Subject: [PATCH 16/29] Set technical_lifetime for 1-period technologies using duration_period --- message_ix_models/model/disutility.py | 55 ++++++++++++++++----------- 1 file changed, 32 insertions(+), 23 deletions(-) diff --git a/message_ix_models/model/disutility.py b/message_ix_models/model/disutility.py index 4bf9156450..c79684149f 100644 --- a/message_ix_models/model/disutility.py +++ b/message_ix_models/model/disutility.py @@ -127,6 +127,23 @@ def get_data(scenario, spec, **kwargs) -> Mapping[str, pd.DataFrame]: return data +def dp_for(col_name: str, info: ScenarioInfo) -> pd.Series: + """:meth:`pandas.DataFrame.assign` helper for ``duration_period``. + + Returns a callable to be passed to :meth:`pandas.DataFrame.assign`. The callable + takes a data frame as the first argument, and returns a :class:`pandas.Series` + based on the ``duration_period`` parameter in `info`, aligned to `col_name` in the + data frame. + """ + + def func(df): + return df.merge(info.par["duration_period"], left_on=col_name, right_on="year")[ + "value_y" + ] + + return func + + def data_conversion(info, spec) -> Mapping[str, pd.DataFrame]: """Input and output data for disutility conversion technologies.""" common = dict( @@ -181,7 +198,7 @@ def output_commodity(df): df = df.pipe(broadcast, node_loc=info.N[1:]).pipe(same_node) if par == "input": # Common across modes - data0[par].append(df.assign(mode="all")) + data0[par].append(df.pipe(broadcast, mode=mode)) # Disutility inputs differ by mode data0[par].append( @@ -189,40 +206,28 @@ def output_commodity(df): ) elif par == "output": # - Broadcast across modes - # - Use a function to set the output commodity based on the - # mode + # - Use a function to set the output commodity based on the mode data0[par].append( df.pipe(broadcast, mode=mode).assign(commodity=output_commodity) ) # Concatenate to a single data frame per parameter - data = {par: pd.concat(dfs) for par, dfs in data0.items()} + data = {par: pd.concat(dfs, ignore_index=True) for par, dfs in data0.items()} # Create data for capacity_factor and technical_lifetime data.update( make_matched_dfs( base=data["input"], capacity_factor=1, - # TODO get this from ScenarioInfo - technical_lifetime=10, - # commented: activity constraints for the technologies - # TODO get these values from an argument - growth_activity_lo=-0.5, - # growth_activity_up=0.5, - # initial_activity_up=1., - # soft_activity_lo=-0.5, - # soft_activity_up=0.5, + technical_lifetime=None, ) ) - # Remove growth_activity_lo for first year - data["growth_activity_lo"] = data["growth_activity_lo"].query( - f"year_act > {spec['add'].y0}" - ) - # commented: initial activity constraints for the technologies - # data.update( - # make_matched_dfs(base=data["output"], initial_activity_up=2.) - # ) + # Update technical_lifetime with values from duration_period for the corresponding + # period + data["technical_lifetime"] = data["technical_lifetime"].assign( + value=dp_for("year_vtg", info), unit="y" + ) return data @@ -253,9 +258,13 @@ def data_source(info, spec) -> Mapping[str, pd.DataFrame]: ), output=1.0, var_cost=1.0, - # TODO get this from ScenarioInfo - technical_lifetime=10, + technical_lifetime=None, ) result["output"] = result["output"].pipe(broadcast, level=sorted(levels)) + # Update technical_lifetime with values from duration_period for the corresponding + # period + result["technical_lifetime"] = result["technical_lifetime"].assign( + value=dp_for("year_vtg", info), unit="y" + ) return result From ad7c74519ae9c4098f5b500a2902d3a4bbd582c3 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Tue, 6 Apr 2021 16:44:59 +0200 Subject: [PATCH 17/29] Expand test_disutility.test_minimal --- .../tests/model/test_disutility.py | 63 +++++++++++-------- 1 file changed, 37 insertions(+), 26 deletions(-) diff --git a/message_ix_models/tests/model/test_disutility.py b/message_ix_models/tests/model/test_disutility.py index b366f5049b..e6911e06bc 100644 --- a/message_ix_models/tests/model/test_disutility.py +++ b/message_ix_models/tests/model/test_disutility.py @@ -1,3 +1,5 @@ +from itertools import product + import pandas as pd import pytest from message_ix import make_df @@ -108,51 +110,60 @@ def test_minimal(scenario, groups, techs, template): common, output=1.0, technical_lifetime=5.0, - var_cost=0.0, + var_cost=1.0, ), ) # For each combination of (tech) × (group) × (2 years) df = pd.DataFrame( [ - ["g0", "output of t0", "t0 usage", 2020, 1.0], - ["g0", "output of t0", "t0 usage", 2025, 1.0], - ["g0", "output of t1", "t1 usage", 2020, 1.0], - ["g0", "output of t1", "t1 usage", 2025, 1.0], - ["g1", "output of t0", "t0 usage", 2020, 1.0], - ["g1", "output of t0", "t0 usage", 2025, 1.0], - ["g1", "output of t1", "t1 usage", 2020, 1.0], - ["g1", "output of t1", "t1 usage", 2025, 1.0], + ["g0", "t0 usage", 2020, 0.1], + ["g0", "t0 usage", 2025, 0.1], + ["g0", "t1 usage", 2020, 0.1], + ["g0", "t1 usage", 2025, 0.1], + ["g1", "t0 usage", 2020, 0.1], + ["g1", "t0 usage", 2025, 0.1], + ["g1", "t1 usage", 2020, 0.1], + ["g1", "t1 usage", 2025, 0.1], ], - columns=["mode", "commodity", "technology", "year_vtg", "value"], + columns=["mode", "technology", "year_vtg", "value"], ) - data["input"] = make_df("input", **df, **COMMON).assign( + data["input"] = make_df("input", **df, commodity="disutility", **COMMON).assign( node_origin=copy_column("node_loc"), year_act=copy_column("year_vtg") ) - data["demand"] = make_df( - "demand", - **pd.DataFrame( - [ - ["demand of group g0", 2020, 1.0], - ["demand of group g0", 2025, 1.0], - ["demand of group g1", 2020, 1.0], - ["demand of group g1", 2025, 1.0], - ], - columns=["commodity", "year", "value"], - ), - **COMMON, - ) + # Demand + c, y = zip(*product(["demand of group g0", "demand of group g1"], [2020, 2025])) + data["demand"] = make_df("demand", commodity=c, year=y, value=1.0, **COMMON) + + # Activity in the first year + m, t = zip(*product(["g0", "g1"], ["t0 usage", "t1 usage"])) + for bound in ("lo", "up"): + par = f"bound_activity_{bound}" + data[par] = make_df( + par, value=0.5, mode=m, technology=t, year_act=2020, **COMMON + ) + + # Bounds + t, ya = zip(*product(["t0 usage", "t1 usage"], [2025])) + for bound, factor in (("lo", -1.0), ("up", 1.0)): + par = f"growth_activity_{bound}" + data[par] = make_df( + par, value=factor * 0.01, technology=t, year_act=ya, **COMMON + ) scenario.check_out() add_par_data(scenario, data) scenario.commit("Disutility test 1") + # Pre-solve debugging output + for par in ("input", "output", "duration_period", "var_cost"): + scenario.par(par).to_csv(f"debug-{par}.csv") + scenario.solve(quiet=True) + # Post-solve debugging output TODO comment before merging ACT = scenario.var("ACT").query("lvl > 0").drop(columns=["node_loc", "time", "mrg"]) - - # For debugging TODO comment before merging print(ACT) From 9e93ce0a3651e22ce197da761645ec272e9bd0ce Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Tue, 6 Apr 2021 17:54:56 +0200 Subject: [PATCH 18/29] Collapse consumer groups into "technology" dimension, not "mode" --- message_ix_models/model/disutility.py | 57 +++++++++------------------ 1 file changed, 19 insertions(+), 38 deletions(-) diff --git a/message_ix_models/model/disutility.py b/message_ix_models/model/disutility.py index c79684149f..7282706136 100644 --- a/message_ix_models/model/disutility.py +++ b/message_ix_models/model/disutility.py @@ -1,6 +1,7 @@ import logging from collections import defaultdict from functools import lru_cache, partial +from itertools import product from typing import Dict, List, Mapping, Sequence, Union import message_ix @@ -65,33 +66,29 @@ def get_spec( add.set["commodity"] = [Code(id="disutility")] add.set["technology"] = [Code(id="disutility source")] - # Add consumer groups - add.set["mode"].extend(Code(id=g.id, name=f"Production for {g.id}") for g in groups) - # Add conversion technologies - for t in technologies: + for t, g in product(technologies, groups): # String formatting arguments - fmt = dict(technology=t) + fmt = dict(technology=t, group=g) # Format each field in the "input" and "output" annotations input = {k: v.format(**fmt) for k, v in eval_anno(template, id="input").items()} - output = eval_anno(template, id="output") + output = { + k: v.format(**fmt) for k, v in eval_anno(template, id="output").items() + } # - Format the ID string from the template # - Copy the "output" annotation without modification t_code = Code( id=template.id.format(**fmt), annotations=[ - template.get_annotation(id="output"), Annotation(id="input", text=repr(input)), + Annotation(id="output", text=repr(output)), ], ) # "commodity" set elements to add - add.set["commodity"].append(input["commodity"]) - add.set["commodity"].extend( - output["commodity"].format(mode=g.id) for g in groups - ) + add.set["commodity"].extend([input["commodity"], output["commodity"]]) # "technology" set elements to add t_code.annotations.append(Annotation(id="input", text=repr(input))) @@ -147,6 +144,7 @@ def func(df): def data_conversion(info, spec) -> Mapping[str, pd.DataFrame]: """Input and output data for disutility conversion technologies.""" common = dict( + mode="all", year_vtg=info.Y, year_act=info.Y, # No subannual detail @@ -157,37 +155,26 @@ def data_conversion(info, spec) -> Mapping[str, pd.DataFrame]: # Use the spec to retrieve information technology = spec["add"].set["technology"] - mode = list(map(str, spec["add"].set["mode"])) # Data to return data0: Mapping[str, List[pd.DataFrame]] = defaultdict(list) - # Loop over technologies + # Loop over conversion technologies for t in technology: # Use the annotations on the technology Code to get information about the # commodity, level, and unit input = eval_anno(t, "input") output = eval_anno(t, "output") - if input is output is None: + if None in (input, output): if t.id == "disutility source": - continue # Data for this tech is from disutility_source() + continue # Data for this tech is from data_source() else: raise ValueError(t) # Error in user input - # Helper functions for output - @lru_cache() - def oc_for_mode(mode): - # Format the output commodity id given the mode id - return output["commodity"].format(mode=mode) - - def output_commodity(df): - # Return a series with output commodity based on mode - return df["mode"].apply(oc_for_mode) - # Make input and output data frames i_o = make_io( (input["commodity"], input["level"], input["unit"]), - (None, output["level"], output["unit"]), + (output["commodity"], output["level"], output["unit"]), 1.0, on="output", technology=t.id, @@ -196,21 +183,15 @@ def output_commodity(df): for par, df in i_o.items(): # Broadcast across nodes df = df.pipe(broadcast, node_loc=info.N[1:]).pipe(same_node) - if par == "input": - # Common across modes - data0[par].append(df.pipe(broadcast, mode=mode)) - # Disutility inputs differ by mode - data0[par].append( - df.assign(commodity="disutility").pipe(broadcast, mode=mode) - ) - elif par == "output": - # - Broadcast across modes - # - Use a function to set the output commodity based on the mode - data0[par].append( - df.pipe(broadcast, mode=mode).assign(commodity=output_commodity) + if par == "input": + # Add input of disutility + df = pd.concat( + [df, df.assign(commodity="disutility")], ignore_index=True ) + data0[par].append(df) + # Concatenate to a single data frame per parameter data = {par: pd.concat(dfs, ignore_index=True) for par, dfs in data0.items()} From 876b69c3bb2fb1587c772c76856b1c24f4ac1bd1 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Tue, 6 Apr 2021 18:13:21 +0200 Subject: [PATCH 19/29] Adjust test_disutility.test_minimal --- .../tests/model/test_disutility.py | 63 ++++++++++--------- 1 file changed, 34 insertions(+), 29 deletions(-) diff --git a/message_ix_models/tests/model/test_disutility.py b/message_ix_models/tests/model/test_disutility.py index e6911e06bc..ac94fc7882 100644 --- a/message_ix_models/tests/model/test_disutility.py +++ b/message_ix_models/tests/model/test_disutility.py @@ -1,6 +1,7 @@ from itertools import product import pandas as pd +import pandas.testing as pdt import pytest from message_ix import make_df from sdmx.model import Annotation, Code @@ -18,6 +19,7 @@ COMMON = dict( level="useful", + mode="all", node_dest="R14_AFR", node_loc="R14_AFR", node_origin="R14_AFR", @@ -50,11 +52,11 @@ def template(): # Template for outputs of conversion technologies, to a group–specific demand # commodity - output = dict(commodity="demand of group {mode}", level="useful", unit="kg") + output = dict(commodity="demand of group {group}", level="useful", unit="kg") # Code's ID is itself a template for IDs of conversion technologies yield Code( - id="{technology} usage", + id="usage of {technology} by {group}", annotations=[ Annotation(id="input", text=repr(input)), Annotation(id="output", text=repr(output)), @@ -115,41 +117,38 @@ def test_minimal(scenario, groups, techs, template): ) # For each combination of (tech) × (group) × (2 years) - df = pd.DataFrame( + input_data = pd.DataFrame( [ - ["g0", "t0 usage", 2020, 0.1], - ["g0", "t0 usage", 2025, 0.1], - ["g0", "t1 usage", 2020, 0.1], - ["g0", "t1 usage", 2025, 0.1], - ["g1", "t0 usage", 2020, 0.1], - ["g1", "t0 usage", 2025, 0.1], - ["g1", "t1 usage", 2020, 0.1], - ["g1", "t1 usage", 2025, 0.1], + ["usage of t0 by g0", 2020, 0.1], + ["usage of t0 by g0", 2025, 0.1], + ["usage of t1 by g0", 2020, 0.1], + ["usage of t1 by g0", 2025, 0.1], + ["usage of t0 by g1", 2020, 0.1], + ["usage of t0 by g1", 2025, 0.1], + ["usage of t1 by g1", 2020, 0.1], + ["usage of t1 by g1", 2025, 0.1], ], - columns=["mode", "technology", "year_vtg", "value"], - ) - data["input"] = make_df("input", **df, commodity="disutility", **COMMON).assign( - node_origin=copy_column("node_loc"), year_act=copy_column("year_vtg") + columns=["technology", "year_vtg", "value"], ) + data["input"] = make_df( + "input", **input_data, commodity="disutility", **COMMON + ).assign(node_origin=copy_column("node_loc"), year_act=copy_column("year_vtg")) # Demand c, y = zip(*product(["demand of group g0", "demand of group g1"], [2020, 2025])) data["demand"] = make_df("demand", commodity=c, year=y, value=1.0, **COMMON) # Activity in the first year - m, t = zip(*product(["g0", "g1"], ["t0 usage", "t1 usage"])) + t = sorted(input_data["technology"].unique()) for bound in ("lo", "up"): par = f"bound_activity_{bound}" - data[par] = make_df( - par, value=0.5, mode=m, technology=t, year_act=2020, **COMMON - ) + data[par] = make_df(par, value=0.5, technology=t, year_act=2020, **COMMON) # Bounds - t, ya = zip(*product(["t0 usage", "t1 usage"], [2025])) for bound, factor in (("lo", -1.0), ("up", 1.0)): par = f"growth_activity_{bound}" data[par] = make_df( - par, value=factor * 0.01, technology=t, year_act=ya, **COMMON + par, value=factor * 0.1, technology=t, year_act=2025, **COMMON ) scenario.check_out() @@ -157,15 +156,19 @@ def test_minimal(scenario, groups, techs, template): scenario.commit("Disutility test 1") # Pre-solve debugging output - for par in ("input", "output", "duration_period", "var_cost"): + for par in ("input", "output", "technical_lifetime", "var_cost"): scenario.par(par).to_csv(f"debug-{par}.csv") scenario.solve(quiet=True) # Post-solve debugging output TODO comment before merging ACT = scenario.var("ACT").query("lvl > 0").drop(columns=["node_loc", "time", "mrg"]) + print(ACT) + # commented: pending debugging + # pdt.assert_series_equal(ACT["year_act"], ACT["year_vtg"]) + def test_data_conversion(scenario, spec): """:func:`~.disutility.data_conversion` runs.""" @@ -205,10 +208,12 @@ def test_get_spec(groups, techs, template): "demand of group g1", } == set(map(str, spec["add"].set["commodity"])) - # Spec adds the "distuility source" technology, and "{tech} usage" for each tech, - # per the template - assert {"disutility source", "t0 usage", "t1 usage"} == set( - map(str, spec["add"].set["technology"]) - ) - # Spec adds two modes - assert {"g0", "g1"} == set(map(str, spec["add"].set["mode"])) + # Spec adds the "distuility source" technology, and "usage of {tech} by {group}" + # for each tech × group, per the template + assert { + "disutility source", + "usage of t0 by g0", + "usage of t0 by g1", + "usage of t1 by g0", + "usage of t1 by g1", + } == set(map(str, spec["add"].set["technology"])) From a6e7506ffef7efc5275c6e2816acd13ca99f9e46 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Tue, 6 Apr 2021 18:15:21 +0200 Subject: [PATCH 20/29] Remove unused imports --- message_ix_models/model/disutility.py | 2 +- message_ix_models/tests/model/test_disutility.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/message_ix_models/model/disutility.py b/message_ix_models/model/disutility.py index 7282706136..3d93bedcc4 100644 --- a/message_ix_models/model/disutility.py +++ b/message_ix_models/model/disutility.py @@ -1,6 +1,6 @@ import logging from collections import defaultdict -from functools import lru_cache, partial +from functools import partial from itertools import product from typing import Dict, List, Mapping, Sequence, Union diff --git a/message_ix_models/tests/model/test_disutility.py b/message_ix_models/tests/model/test_disutility.py index ac94fc7882..f5f38fad1b 100644 --- a/message_ix_models/tests/model/test_disutility.py +++ b/message_ix_models/tests/model/test_disutility.py @@ -1,7 +1,8 @@ from itertools import product import pandas as pd -import pandas.testing as pdt + +# import pandas.testing as pdt import pytest from message_ix import make_df from sdmx.model import Annotation, Code From 89ed40176946730c56520f78725fecebac4ece07 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Wed, 7 Apr 2021 12:40:32 +0200 Subject: [PATCH 21/29] Make technical_lifetime optional for .util.make_source_tech() --- message_ix_models/util/__init__.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/message_ix_models/util/__init__.py b/message_ix_models/util/__init__.py index b294444e0d..d92a839bcc 100644 --- a/message_ix_models/util/__init__.py +++ b/message_ix_models/util/__init__.py @@ -430,8 +430,8 @@ def make_source_tech(info, common, **values) -> Dict[str, pd.DataFrame]: common : dict Passed to :func:`make_df`. **values - Values for 'capacity_factor' (optional; default 1.0), 'output', - 'technical_lifetime', and 'var_cost'. + Values for 'capacity_factor' (optional; default 1.0), 'output', 'var_cost', and + optionally 'technical_lifetime'. Returns ------- @@ -440,15 +440,15 @@ def make_source_tech(info, common, **values) -> Dict[str, pd.DataFrame]: """ # Check arguments values.setdefault("capacity_factor", 1.0) - missing = {"capacity_factor", "output", "technical_lifetime", "var_cost"} - set( - values.keys() - ) + missing = {"capacity_factor", "output", "var_cost"} - set(values.keys()) if len(missing): - raise ValueError(f"make_dummy_source() needs values for {repr(missing)}") + raise ValueError(f"make_source_tech() needs values for {repr(missing)}") + elif "technical_lifetime" not in values: + log.debug("No technical_lifetime for source technology") # Create data for "output" - output = ( - message_ix.make_df( + result = dict( + output=message_ix.make_df( "output", value=values.pop("output"), year_act=info.Y, @@ -458,8 +458,9 @@ def make_source_tech(info, common, **values) -> Dict[str, pd.DataFrame]: .pipe(broadcast, node_loc=info.N[1:]) .pipe(same_node) ) - result = make_matched_dfs(base=output, **values) - result["output"] = output + + # Add data for other parameters + result.update(make_matched_dfs(base=result["output"], **values)) return result From 7d333ec8c654eba28a8fe5c2a192632be6f2f22b Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Wed, 7 Apr 2021 12:40:56 +0200 Subject: [PATCH 22/29] Remove technical_lifetime for disutility source and usage technologies --- message_ix_models/model/disutility.py | 23 +++-------------------- 1 file changed, 3 insertions(+), 20 deletions(-) diff --git a/message_ix_models/model/disutility.py b/message_ix_models/model/disutility.py index 3d93bedcc4..0171b7e3e8 100644 --- a/message_ix_models/model/disutility.py +++ b/message_ix_models/model/disutility.py @@ -195,20 +195,8 @@ def data_conversion(info, spec) -> Mapping[str, pd.DataFrame]: # Concatenate to a single data frame per parameter data = {par: pd.concat(dfs, ignore_index=True) for par, dfs in data0.items()} - # Create data for capacity_factor and technical_lifetime - data.update( - make_matched_dfs( - base=data["input"], - capacity_factor=1, - technical_lifetime=None, - ) - ) - - # Update technical_lifetime with values from duration_period for the corresponding - # period - data["technical_lifetime"] = data["technical_lifetime"].assign( - value=dp_for("year_vtg", info), unit="y" - ) + # Create data for capacity_factor + data.update(make_matched_dfs(base=data["input"], capacity_factor=1.0)) return data @@ -227,6 +215,7 @@ def data_source(info, spec) -> Mapping[str, pd.DataFrame]: log.info(f"Generate disutility on level(s): {repr(levels)}") + # Use default capacity_factor = 1.0 result = make_source_tech( info, common=dict( @@ -239,13 +228,7 @@ def data_source(info, spec) -> Mapping[str, pd.DataFrame]: ), output=1.0, var_cost=1.0, - technical_lifetime=None, ) result["output"] = result["output"].pipe(broadcast, level=sorted(levels)) - # Update technical_lifetime with values from duration_period for the corresponding - # period - result["technical_lifetime"] = result["technical_lifetime"].assign( - value=dp_for("year_vtg", info), unit="y" - ) return result From 07b53874a34b6145c02e5a1ed93c8f69704e7186 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Wed, 7 Apr 2021 13:33:02 +0200 Subject: [PATCH 23/29] Drop extra columns in .util.{broadcast,make_matched_dfs} --- message_ix_models/util/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/message_ix_models/util/__init__.py b/message_ix_models/util/__init__.py index d92a839bcc..381bda8133 100644 --- a/message_ix_models/util/__init__.py +++ b/message_ix_models/util/__init__.py @@ -155,7 +155,7 @@ def broadcast(df, **kwargs): pd.concat([df] * len(levels), keys=levels, names=[dim]) .drop(dim, axis=1) .reset_index(dim) - .reset_index() + .reset_index(drop=True) ) return df @@ -412,7 +412,7 @@ def make_matched_dfs(base, **par_value): return { par: message_ix.make_df(par, **data, value=value) .drop_duplicates() - .reset_index() + .reset_index(drop=True) for par, value in par_value.items() } From bd1b074cf33186bc86848cf17e098da16dbd3014 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Wed, 7 Apr 2021 13:33:23 +0200 Subject: [PATCH 24/29] Ensure disutility is unitless --- message_ix_models/model/disutility.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/message_ix_models/model/disutility.py b/message_ix_models/model/disutility.py index 0171b7e3e8..f41501af44 100644 --- a/message_ix_models/model/disutility.py +++ b/message_ix_models/model/disutility.py @@ -66,6 +66,9 @@ def get_spec( add.set["commodity"] = [Code(id="disutility")] add.set["technology"] = [Code(id="disutility source")] + # Disutility is unitless + add.set["unit"].append("") + # Add conversion technologies for t, g in product(technologies, groups): # String formatting arguments @@ -187,7 +190,7 @@ def data_conversion(info, spec) -> Mapping[str, pd.DataFrame]: if par == "input": # Add input of disutility df = pd.concat( - [df, df.assign(commodity="disutility")], ignore_index=True + [df, df.assign(commodity="disutility", unit="")], ignore_index=True ) data0[par].append(df) From c6d0a157453b144756335c9375091d873a6126c9 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Wed, 7 Apr 2021 13:35:56 +0200 Subject: [PATCH 25/29] Clean up test_disutility.test_minimal --- .../tests/model/test_disutility.py | 122 ++++++++++++------ 1 file changed, 85 insertions(+), 37 deletions(-) diff --git a/message_ix_models/tests/model/test_disutility.py b/message_ix_models/tests/model/test_disutility.py index f5f38fad1b..00a8e45b16 100644 --- a/message_ix_models/tests/model/test_disutility.py +++ b/message_ix_models/tests/model/test_disutility.py @@ -2,7 +2,7 @@ import pandas as pd -# import pandas.testing as pdt +import pandas.testing as pdt import pytest from message_ix import make_df from sdmx.model import Annotation, Code @@ -92,42 +92,34 @@ def test_add(scenario, groups, techs, template): assert (scenario.var("ACT")["lvl"] == 0).all() -def test_minimal(scenario, groups, techs, template): - """Minimal test case for disutility formulation.""" - disutility.add(scenario, groups, techs, template) - +def minimal_test_data(scenario): # Fill in the data for the test case - common = COMMON.copy() common.pop("node_loc") common.update(dict(mode="all")) data = dict() + info = ScenarioInfo(scenario) + y0 = info.Y[0] + y1 = info.Y[1] + + # Output from t0 and t1 for t in ("t0", "t1"): common.update(dict(technology=t, commodity=f"output of {t}")) - merge_data( - data, - make_source_tech( - ScenarioInfo(scenario), - common, - output=1.0, - technical_lifetime=5.0, - var_cost=1.0, - ), - ) + merge_data(data, make_source_tech(info, common, output=1.0, var_cost=1.0)) - # For each combination of (tech) × (group) × (2 years) + # Disutility input for each combination of (tech) × (group) × (2 years) input_data = pd.DataFrame( [ - ["usage of t0 by g0", 2020, 0.1], - ["usage of t0 by g0", 2025, 0.1], - ["usage of t1 by g0", 2020, 0.1], - ["usage of t1 by g0", 2025, 0.1], - ["usage of t0 by g1", 2020, 0.1], - ["usage of t0 by g1", 2025, 0.1], - ["usage of t1 by g1", 2020, 0.1], - ["usage of t1 by g1", 2025, 0.1], + ["usage of t0 by g0", y0, 0.1], + ["usage of t0 by g0", y1, 0.1], + ["usage of t1 by g0", y0, 0.1], + ["usage of t1 by g0", y1, 0.1], + ["usage of t0 by g1", y0, 0.1], + ["usage of t0 by g1", y1, 0.1], + ["usage of t1 by g1", y0, 0.1], + ["usage of t1 by g1", y1, 0.1], ], columns=["technology", "year_vtg", "value"], ) @@ -136,39 +128,95 @@ def test_minimal(scenario, groups, techs, template): ).assign(node_origin=copy_column("node_loc"), year_act=copy_column("year_vtg")) # Demand - c, y = zip(*product(["demand of group g0", "demand of group g1"], [2020, 2025])) + c, y = zip(*product(["demand of group g0", "demand of group g1"], [y0, y1])) data["demand"] = make_df("demand", commodity=c, year=y, value=1.0, **COMMON) - # Activity in the first year + # Constraint on activity in the first period t = sorted(input_data["technology"].unique()) for bound in ("lo", "up"): par = f"bound_activity_{bound}" - data[par] = make_df(par, value=0.5, technology=t, year_act=2020, **COMMON) + data[par] = make_df(par, value=0.5, technology=t, year_act=y0, **COMMON) - # Bounds + # Constraint on activity growth + annual = (1.1 ** (1.0 / 5.0)) - 1.0 for bound, factor in (("lo", -1.0), ("up", 1.0)): par = f"growth_activity_{bound}" data[par] = make_df( - par, value=factor * 0.1, technology=t, year_act=2025, **COMMON + par, value=factor * annual, technology=t, year_act=y1, **COMMON ) + return data, y0, y1 + + +def test_minimal(scenario, groups, techs, template): + """Minimal test case for :mod:`.disutility`.""" + # Set up structure + disutility.add(scenario, groups, techs, template) + + # Add test-specific data + data, y0, y1 = minimal_test_data(scenario) + scenario.check_out() add_par_data(scenario, data) scenario.commit("Disutility test 1") - # Pre-solve debugging output - for par in ("input", "output", "technical_lifetime", "var_cost"): - scenario.par(par).to_csv(f"debug-{par}.csv") + # commented: pre-solve debugging output + # for par in ("input", "output", "technical_lifetime", "var_cost"): + # scenario.par(par).to_csv(f"debug-{par}.csv") scenario.solve(quiet=True) + # Helper function to retrieve ACT data and condense for inspection + def get_act(s): + result = ( + scenario.var("ACT") + .query("lvl > 0") + .drop(columns=["node_loc", "mode", "time", "mrg"]) + .sort_values(["year_vtg", "technology"]) + .reset_index(drop=True) + ) + # No "stray" activity of technologies beyond the vintage periods + pdt.assert_series_equal( + result["year_act"], result["year_vtg"], check_names=False + ) + result = result.drop(columns=["year_vtg"]).set_index(["technology", "year_act"]) + # Return the activity and its inter-period delta + return result, ( + result.xs(y1, level="year_act") - result.xs(y0, level="year_act") + ) + # Post-solve debugging output TODO comment before merging - ACT = scenario.var("ACT").query("lvl > 0").drop(columns=["node_loc", "time", "mrg"]) + ACT1, ACT1_delta = get_act(scenario) + + # Increase the disutility of for t0 for g0 in period y1 + data["input"].loc[1, "value"] = 0.2 + + # Re-solve + scenario.remove_solution() + scenario.check_out() + scenario.add_par("input", data["input"]) + scenario.commit("Disutility test 2") + scenario.solve(quiet=True) + + # Compare activity + ACT2, ACT2_delta = get_act(scenario) + + merged = ACT1.merge(ACT2, left_index=True, right_index=True) + merged["lvl_diff"] = merged["lvl_y"] - merged["lvl_x"] + + merged_delta = ACT1_delta.merge(ACT2_delta, left_index=True, right_index=True) + + # commented: for debugging + # print(merged, merged_delta) - print(ACT) + # Group g0 decreases usage of t0, and increases usage of t1, in period y1 vs. y0 + assert merged_delta.loc["usage of t0 by g0", "lvl_y"] < 0 + assert merged_delta.loc["usage of t1 by g0", "lvl_y"] > 0 - # commented: pending debugging - # pdt.assert_series_equal(ACT["year_act"], ACT["year_vtg"]) + # Group g0 usage of t0 is lower when the disutility is higher + assert merged.loc[("usage of t0 by g0", y1), "lvl_diff"] < 0 + # Group g0 usage of t1 is correspondingly higher + assert merged.loc[("usage of t1 by g0", y1), "lvl_diff"] > 0 def test_data_conversion(scenario, spec): From 752e3e6cc34958cac0780044f1bfca672dc43d98 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Wed, 7 Apr 2021 14:06:28 +0200 Subject: [PATCH 26/29] Expand documentation --- doc/api/disutility.rst | 93 +++++++++---------- doc/api/model-build.rst | 4 +- message_ix_models/model/disutility.py | 4 +- .../tests/model/test_disutility.py | 15 +-- 4 files changed, 57 insertions(+), 59 deletions(-) diff --git a/doc/api/disutility.rst b/doc/api/disutility.rst index 1ab8d663f9..75edbde4b0 100644 --- a/doc/api/disutility.rst +++ b/doc/api/disutility.rst @@ -1,12 +1,11 @@ -.. currentmodule:: message_data.model.disutility +.. currentmodule:: message_ix_models.model.disutility Consumer disutility ******************* This module provides a generalized consumer disutility formulation, currently used by :mod:`message_data.model.transport`. - -The formulation rests on the concept of “consumer groups.” -Each consumer group may have a distinct disutility for using the outputs of each technology. +The formulation rests on the concept of “consumer groups”; each consumer group may have a distinct disutility associated with using the outputs of each technology. +A set of ‘pseudo-’/‘virtual’/non-physical “usage technologies” converts the outputs of the actual technologies into the commodities demanded by each group, while also requiring input of a costly “disutility” commodity. Method & usage @@ -25,48 +24,51 @@ Technologies Template This is also a :class:`.Code` object, similar to those in ``technologies``; see below. +The code creates a source technology for the “disutility” commodity. +The code does *not* perform the following step(s) needed to completely parametrize the formulation: -The code does *not* do the following steps needed to completely parametrize the formulation: - -- Set consumer group-specific 'demand' parameter values for new commodities. -- Create a source technology for the “disutility” commodity. +- Set consumer group-specific ``demand`` parameter values for new commodities. +- Set the amounts of “disutility” commodities used as ``input`` to the new usage technologies. +These must be parametrized based on the particular application. Detailed example ================ -From :func:`.transport.build.main`: +This example is similar to the one used in :func:`.test_disutility.test_minimal`: .. code-block:: python - # Add generalized disutility formulation to LDV technologies - disutility.add( - scenario, + # Two consumer groups + groups = [Code(id="g0"), Code(id="g1")] - # Generate a list of consumer groups - consumer_groups=consumer_groups(), + # Two technologies, for which groups may have different disutilities. + techs = [Code(id="t0"), Code(id="t1")] - # Generate a list of technologies - technologies=generate_set_elements("technology", "LDV"), + # Add generalized disutility formulation to some technologies + disutility.add( + scenario, + groups=groups, + technologies=techs, template=Code( # Template for IDs of conversion technologies - id="transport {technology} usage", + id="usage of {technology} by {group}", # Templates for inputs of conversion technologies input=dict( # Technology-specific output commodity - commodity="transport vehicle {technology}", + commodity="output of {technology}", level="useful", - unit="km", + unit="kg", ), # Templates for outputs of conversion technologies output=dict( # Consumer-group–specific demand commodity - commodity="transport pax {mode}", + commodity="demand of group {group}", level="useful", - unit="km", + unit="kg", ), ), **options, @@ -75,49 +77,44 @@ From :func:`.transport.build.main`: :func:`add` uses :func:`get_spec` to generate a specification that adds the following: -- A single 'commodity' set element, “disutility”. - -- 1 'mode' set element per element in ``consumer_groups``. +- For the set ``commodity``: - **Example:** the function :func:`.consumer_groups` returns codes like “RUEAA”, “URLMF”, etc.; one 'mode' is created for each such group. + - The single element “disutility”. + - One element per `technologies`, using the `template` “input” annotation, e.g. “output of t0” generated from ``output of {technology}`` and the id “t0”. + These **may** already be present in the `scenario`; if not, the spec causes them to be added. + - One elements per `groups`, using the `template` “output” annotation, e.g. “demand of group g1” generated from ``demand of group {group}`` and the id “g1”. + These **may** already be present in the `scenario`; if not, the spec causes them to be added. -- 1 'commodity' set element per technology in ``technologies``. - ``template.anno["input"]["commodity"]`` is used to generate the IDs of these commodities. - - **Example:** “transport vehicle {technology}” is used to generate a commodity “transport vehicles ELC_100” associated with the technology with the ID “ELC_100”. - -- 1 'commodity' set element per consumer group. - ``template.anno["output"]["commodity"]`` is used to generate the IDs of these commodities. - - **Example:** “transport pax {mode}” is used with to generate a commodity “transport pax RUEAA” is associated with the consumer group with ID “RUEAA”. - -- 1 additional 'technology' set element per disutility-affected technology. - ``template.id`` is used to generate the IDs of these technologies. - - **Example:** “transport {technology} usage}” is used to generate “transport ELC_100 usage” associated with the existing technology “ELC_100”. +- For the set ``technology``: + - The single element “disutility source”. + - One element per each combination of disutility-affected technology (`technologies`) and consumer group (`groups`). + For example, “usage of t0 by g1” generated from ``usage of {technology} by {group}``, and the ids “t0” and “g1”. The spec is applied to the target scenario using :func:`.model.build.apply_spec`. If the arguments produce a spec that is inconsistent with the target scenario, an exception will by raised at this point. -Next, :func:`add` uses :func:`disutility_conversion` to generate data for the 'input' and 'output' parameters, as follows: - -- Existing, disutility-affected technologies (those listed in the ``technologies`` argument) 'output' to technology-specific commodities. +Next, :func:`add` uses :func:`data_conversion` and :func:`data_source` to generate: - **Example:** the technology “ELC_100” outputs to the commodity “transport vehicle ELC_100”, instead of to a common/pooled commodity such as “transport vehicle”. +- ``output`` and ``var_cost`` parameter data for “disutility source”. + This technology outputs the unitless commodity “disutility” at a cost of 1.0 per unit. -- New, conversion technologies have one 'mode' per consumer group. +- ``input`` and ``output`` parameter data for the new usage technologies. + For example, the new technology “usage of t0 by g1”… - **Example:** the new technology “transport ELC_100 usage” + - …takes input from the *technology-specific* commodity “output of t0”. + - …takes input from the common commodity “disutility”, in an amount specific to group “g1”. + - …outputs to a *group-specific* commodity “demand of group g1”. - - …in “all” modes—takes the *same* quantity of input from the *technology-specific* commodity “transport ELC_100 vehicle”. - - …in each consumer-group specific mode e.g. “RUEAA”—takes a *group-specific* quantity of input from the common commodity “disutility”. - - …in each consumer-group specific mode e.g. “RUEAA”—outputs to a *group-specific* commodity, e.g. “transport pax RUEAA”. +Note that the `technologies` towards which the groups have disutility are assumed to already be configured to ``output`` to the corresponding commodities. +For example, the technology “t0” outputs to the commodity “output of t0”; the ``output`` values for this technology are **not** added/introduced by :func:`add`. Code reference ============== +See also :mod:`message_ix_models.tests.model.test_disutility`. + .. automodule:: message_ix_models.model.disutility :members: diff --git a/doc/api/model-build.rst b/doc/api/model-build.rst index 35444a631a..cc3d6367cb 100644 --- a/doc/api/model-build.rst +++ b/doc/api/model-build.rst @@ -37,7 +37,7 @@ The following modules use this workflow and can be examples for developing simil Code reference ============== -.. currentmodule:: message_data.model.build +.. currentmodule:: message_ix_models.model.build -.. automodule:: message_data.model.build +.. automodule:: message_ix_models.model.build :members: diff --git a/message_ix_models/model/disutility.py b/message_ix_models/model/disutility.py index f41501af44..5e14cc7349 100644 --- a/message_ix_models/model/disutility.py +++ b/message_ix_models/model/disutility.py @@ -145,7 +145,7 @@ def func(df): def data_conversion(info, spec) -> Mapping[str, pd.DataFrame]: - """Input and output data for disutility conversion technologies.""" + """Generate input and output data for disutility conversion technologies.""" common = dict( mode="all", year_vtg=info.Y, @@ -205,7 +205,7 @@ def data_conversion(info, spec) -> Mapping[str, pd.DataFrame]: def data_source(info, spec) -> Mapping[str, pd.DataFrame]: - """Generate data for a technology that emits the disutility commodity.""" + """Generate data for a technology that emits the “disutility” commodity.""" # List of input levels where disutility commodity must exist levels = set() for t in spec["add"].set["technology"]: diff --git a/message_ix_models/tests/model/test_disutility.py b/message_ix_models/tests/model/test_disutility.py index 00a8e45b16..099ad9ec24 100644 --- a/message_ix_models/tests/model/test_disutility.py +++ b/message_ix_models/tests/model/test_disutility.py @@ -1,3 +1,4 @@ +"""Tests of :mod:`.model.disutility`.""" from itertools import product import pandas as pd @@ -34,19 +35,19 @@ @pytest.fixture def groups(): - """List of two consumer groups.""" + """Fixture: list of 2 consumer groups.""" yield [Code(id="g0"), Code(id="g1")] @pytest.fixture def techs(): - """List of two technologies, for which groups may have different disutilities.""" + """Fixture: list of 2 technologies for which groups can have disutility.""" yield [Code(id="t0"), Code(id="t1")] @pytest.fixture def template(): - """:class:.`Code` object with annotations, for :func:`.disutility.get_spec`.""" + """Fixture: :class:.`Code` with annotations, for :func:`.disutility.get_spec`.""" # Template for inputs of conversion technologies, from a technology-specific # commodity input = dict(commodity="output of {technology}", level="useful", unit="kg") @@ -67,13 +68,13 @@ def template(): @pytest.fixture def spec(groups, techs, template): - """A prepared spec for the minimal test case.""" + """Fixture: a prepared spec for the minimal test case.""" yield disutility.get_spec(groups, techs, template) @pytest.fixture def scenario(request, test_context, techs): - """A :class:`.Scenario` with technologies given by :func:`techs`.""" + """Fixture: a :class:`.Scenario` with technologies given by :func:`techs`.""" s = testing.bare_res(request, test_context, solved=False) s.check_out() @@ -93,7 +94,7 @@ def test_add(scenario, groups, techs, template): def minimal_test_data(scenario): - # Fill in the data for the test case + """Generate data for :func:`test_minimal`.""" common = COMMON.copy() common.pop("node_loc") common.update(dict(mode="all")) @@ -149,7 +150,7 @@ def minimal_test_data(scenario): def test_minimal(scenario, groups, techs, template): - """Minimal test case for :mod:`.disutility`.""" + """Expected results are generated from a minimal test case.""" # Set up structure disutility.add(scenario, groups, techs, template) From 442bd8ceffeb9b74b91241baeb3db61d412b0c53 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Wed, 7 Apr 2021 14:08:16 +0200 Subject: [PATCH 27/29] Sort imports in test_disutility.py --- message_ix_models/tests/model/test_disutility.py | 1 - 1 file changed, 1 deletion(-) diff --git a/message_ix_models/tests/model/test_disutility.py b/message_ix_models/tests/model/test_disutility.py index 099ad9ec24..41f1cb7cec 100644 --- a/message_ix_models/tests/model/test_disutility.py +++ b/message_ix_models/tests/model/test_disutility.py @@ -2,7 +2,6 @@ from itertools import product import pandas as pd - import pandas.testing as pdt import pytest from message_ix import make_df From f882864e09d5e0ac4f3e2f03dd193ca9b6faa015 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Wed, 7 Apr 2021 14:09:40 +0200 Subject: [PATCH 28/29] Add #13 to doc/whatsnew --- doc/whatsnew.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/doc/whatsnew.rst b/doc/whatsnew.rst index ce515ceb84..1bedb1e34a 100644 --- a/doc/whatsnew.rst +++ b/doc/whatsnew.rst @@ -1,8 +1,10 @@ What's new ********** -.. Next release -.. ============ +Next release +============ + +- Add :mod:`.model.disutility`, code for setting up structure and data for generalized consumer disutility (:pull:`13`) 2021.3.24 ========= From 55143b4d50307a9677aeef300165b52f20030f48 Mon Sep 17 00:00:00 2001 From: Paul Natsuo Kishimoto Date: Wed, 7 Apr 2021 14:54:14 +0200 Subject: [PATCH 29/29] Add test coverage of several stray lines --- message_ix_models/model/disutility.py | 9 +++--- message_ix_models/testing.py | 3 +- message_ix_models/tests/test_testing.py | 13 ++++++++ message_ix_models/tests/test_util.py | 41 +++++++++++++++++-------- message_ix_models/util/__init__.py | 6 ++-- 5 files changed, 51 insertions(+), 21 deletions(-) diff --git a/message_ix_models/model/disutility.py b/message_ix_models/model/disutility.py index 5e14cc7349..afe3c74d75 100644 --- a/message_ix_models/model/disutility.py +++ b/message_ix_models/model/disutility.py @@ -127,13 +127,15 @@ def get_data(scenario, spec, **kwargs) -> Mapping[str, pd.DataFrame]: return data -def dp_for(col_name: str, info: ScenarioInfo) -> pd.Series: +def dp_for(col_name: str, info: ScenarioInfo) -> pd.Series: # pragma: no cover """:meth:`pandas.DataFrame.assign` helper for ``duration_period``. Returns a callable to be passed to :meth:`pandas.DataFrame.assign`. The callable takes a data frame as the first argument, and returns a :class:`pandas.Series` based on the ``duration_period`` parameter in `info`, aligned to `col_name` in the data frame. + + Currently (2021-04-07) unused. """ def func(df): @@ -171,7 +173,7 @@ def data_conversion(info, spec) -> Mapping[str, pd.DataFrame]: if None in (input, output): if t.id == "disutility source": continue # Data for this tech is from data_source() - else: + else: # pragma: no cover raise ValueError(t) # Error in user input # Make input and output data frames @@ -212,9 +214,6 @@ def data_source(info, spec) -> Mapping[str, pd.DataFrame]: input = eval_anno(t, "input") if input: levels.add(input["level"]) - else: - # "disutility source" technology has no annotations - continue log.info(f"Generate disutility on level(s): {repr(levels)}") diff --git a/message_ix_models/testing.py b/message_ix_models/testing.py index 0a36570c0c..558e6c338b 100644 --- a/message_ix_models/testing.py +++ b/message_ix_models/testing.py @@ -127,7 +127,8 @@ def assert_exit_0(self, *args, **kwargs): self.invoke(*args, **kwargs) if self.last_result.exit_code != 0: - raise self.last_result.exc_info[1] + # Re-raise the exception triggered within the CLI invocation + raise self.last_result.exc_info[1].__context__ return self.last_result diff --git a/message_ix_models/tests/test_testing.py b/message_ix_models/tests/test_testing.py index 369de59ad0..a6d5ec05f7 100644 --- a/message_ix_models/tests/test_testing.py +++ b/message_ix_models/tests/test_testing.py @@ -1,6 +1,14 @@ +import click +import pytest + from message_ix_models.testing import bare_res +def test_bare_res_no_request(test_context): + """:func:`bare_res` works with `request` = :obj:`None`.""" + bare_res(None, test_context, solved=False) + + def test_bare_res_solved(request, test_context): """:func:`bare_res` works with `solve` = :obj:`True`. @@ -8,3 +16,8 @@ def test_bare_res_solved(request, test_context): test. """ bare_res(request, test_context, solved=True) + + +def test_cli_runner(mix_models_cli): + with pytest.raises(click.exceptions.UsageError, match="No such command 'foo'"): + mix_models_cli.assert_exit_0(["foo", "bar"]) diff --git a/message_ix_models/tests/test_util.py b/message_ix_models/tests/test_util.py index 19cb875d50..39ad4b4186 100644 --- a/message_ix_models/tests/test_util.py +++ b/message_ix_models/tests/test_util.py @@ -1,5 +1,6 @@ """Tests of :mod:`message_ix_models.util`.""" import logging +import re from pathlib import Path import pandas as pd @@ -35,6 +36,16 @@ def test_as_codes(): assert result[1] not in result[0].child +def test_broadcast(caplog): + # Debug message logged with length-0 values + with caplog.at_level(logging.DEBUG, logger="message_ix_models"): + broadcast(pd.DataFrame(columns=["foo", "bar"]), foo=[], bar=[]) + + assert "Don't broadcast over 'foo'; labels [] have length 0" in caplog.messages + + # TODO expand + + @pytest.mark.parametrize( "data", ( @@ -77,6 +88,10 @@ def test_ffill(): assert 2 * len(df) == len(result) assert years == sorted(result["year_vtg"].unique()) + # Cannot ffill on "value" and "unit" dimensions + with pytest.raises(ValueError, match="value"): + ffill(df, "value", []) + # TODO test some specific values @@ -128,19 +143,17 @@ def test_make_source_tech(): var_cost=3.0, technical_lifetime=4.0, ) - result = make_source_tech( - info, - common=dict( - commodity="commodity", - level="level", - mode="mode", - technology="technology", - time="time", - time_dest="time", - unit="unit", - ), - **values, + common = dict( + commodity="commodity", + level="level", + mode="mode", + technology="technology", + time="time", + time_dest="time", + unit="unit", ) + # Code runs + result = make_source_tech(info, common, **values) # Result is dictionary with the expected keys assert isinstance(result, dict) assert set(result.keys()) == set(values.keys()) @@ -154,6 +167,10 @@ def test_make_source_tech(): # No empty values assert not df.isna().any(None) + del values["var_cost"] + with pytest.raises(ValueError, match=re.escape("needs values for {'var_cost'}")): + make_source_tech(info, common, **values) + def test_package_data_path(*parts, suffix=None): assert MESSAGE_MODELS_PATH.joinpath("data", "foo", "bar") == package_data_path( diff --git a/message_ix_models/util/__init__.py b/message_ix_models/util/__init__.py index 381bda8133..d2e8bc608c 100644 --- a/message_ix_models/util/__init__.py +++ b/message_ix_models/util/__init__.py @@ -205,7 +205,7 @@ def ffill( Data to fill forwards. dim : str Dimension to fill along. Must be a column in `df`. - labels : list of str + values : list of str Labels along `dim` that must be present in the returned data frame. expr : str, optional If provided, :meth:`.DataFrame.eval` is called. This can be used to assign one @@ -507,8 +507,8 @@ def strip_par_data( # Iterate over parameters with ≥1 dimensions indexed by `set_name` for par_name in iter_parameters(set_name): - if par_name not in par_list: - log.warning( # pragma: no cover + if par_name not in par_list: # pragma: no cover + log.warning( f"MESSAGEix parameter {repr(par_name)} missing in Scenario " f"{scenario.model}/{scenario.scenario}" )