Skip to content

Commit

Permalink
Comply with reduced complexity limit
Browse files Browse the repository at this point in the history
  • Loading branch information
glatterf42 committed Apr 3, 2024
1 parent df20a9d commit c3701f5
Show file tree
Hide file tree
Showing 2 changed files with 148 additions and 123 deletions.
211 changes: 116 additions & 95 deletions message_ix_models/model/water/data/infrastructure.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
treatment in urban & rural"""

from collections import defaultdict
from typing import TYPE_CHECKING
from typing import TYPE_CHECKING, Any

import pandas as pd
from message_ix import make_df
Expand Down Expand Up @@ -213,100 +213,15 @@ def add_infrastructure_techs(context: "Context"):
sub_time=sub_time,
)

result_dc = defaultdict(list)

for index, rows in df_elec.iterrows():
if rows["tec"] in techs:
if context.SDG != "baseline":
inp = make_df(
"input",
technology=rows["tec"],
value=rows["value_high"],
unit="-",
level="final",
commodity="electr",
mode="Mf",
time_origin="year",
node_loc=df_node["node"],
node_origin=df_node["region"],
).pipe(
broadcast,
map_yv_ya_lt(
year_wat,
# 1 because elec commodities don't have technical lifetime
1,
first_year,
),
time=sub_time,
)

result_dc["input"].append(inp)
else:
inp = make_df(
"input",
technology=rows["tec"],
value=rows["value_high"],
unit="-",
level="final",
commodity="electr",
mode="Mf",
time_origin="year",
node_loc=df_node["node"],
node_origin=df_node["region"],
).pipe(
broadcast,
map_yv_ya_lt(
year_wat,
# 1 because elec commodities don't have technical lifetime
1,
first_year,
),
time=sub_time,
)

inp = pd.concat(
[
inp,
make_df(
"input",
technology=rows["tec"],
value=rows["value_mid"],
unit="-",
level="final",
commodity="electr",
mode="M1",
time_origin="year",
node_loc=df_node["node"],
node_origin=df_node["region"],
).pipe(
broadcast,
# 1 because elec commodities don't have technical lifetime
map_yv_ya_lt(year_wat, 1, first_year),
time=sub_time,
),
]
)

result_dc["input"].append(inp)
else:
inp = make_df(
"input",
technology=rows["tec"],
value=rows["value_mid"],
unit="-",
level="final",
commodity="electr",
mode="M1",
time_origin="year",
node_loc=df_node["node"],
node_origin=df_node["region"],
).pipe(
broadcast,
map_yv_ya_lt(year_wat, 1, first_year),
time=sub_time,
)

result_dc["input"].append(inp)
result_dc = prepare_input_dataframe(
context=context,
sub_time=sub_time,
year_wat=year_wat,
first_year=first_year,
df_node=df_node,
techs=techs,
df_elec=df_elec,
)

results_new = {par_name: pd.concat(dfs) for par_name, dfs in result_dc.items()}

Expand Down Expand Up @@ -622,6 +537,112 @@ def add_infrastructure_techs(context: "Context"):
return results


def prepare_input_dataframe(
context: "Context",
sub_time,
year_wat: tuple,
first_year: int,
df_node: pd.DataFrame,
techs: list[str],
df_elec: pd.DataFrame,
) -> defaultdict[Any, list]:
result_dc = defaultdict(list)

for _, rows in df_elec.iterrows():
if rows["tec"] in techs:
if context.SDG != "baseline":
inp = make_df(
"input",
technology=rows["tec"],
value=rows["value_high"],
unit="-",
level="final",
commodity="electr",
mode="Mf",
time_origin="year",
node_loc=df_node["node"],
node_origin=df_node["region"],
).pipe(
broadcast,
map_yv_ya_lt(
year_wat,
# 1 because elec commodities don't have technical lifetime
1,
first_year,
),
time=sub_time,
)

result_dc["input"].append(inp)
else:
inp = make_df(
"input",
technology=rows["tec"],
value=rows["value_high"],
unit="-",
level="final",
commodity="electr",
mode="Mf",
time_origin="year",
node_loc=df_node["node"],
node_origin=df_node["region"],
).pipe(
broadcast,
map_yv_ya_lt(
year_wat,
# 1 because elec commodities don't have technical lifetime
1,
first_year,
),
time=sub_time,
)

inp = pd.concat(
[
inp,
make_df(
"input",
technology=rows["tec"],
value=rows["value_mid"],
unit="-",
level="final",
commodity="electr",
mode="M1",
time_origin="year",
node_loc=df_node["node"],
node_origin=df_node["region"],
).pipe(
broadcast,
# 1 because elec commodities don't have technical lifetime
map_yv_ya_lt(year_wat, 1, first_year),
time=sub_time,
),
]
)

result_dc["input"].append(inp)
else:
inp = make_df(
"input",
technology=rows["tec"],
value=rows["value_mid"],
unit="-",
level="final",
commodity="electr",
mode="M1",
time_origin="year",
node_loc=df_node["node"],
node_origin=df_node["region"],
).pipe(
broadcast,
map_yv_ya_lt(year_wat, 1, first_year),
time=sub_time,
)

result_dc["input"].append(inp)
return result_dc


def add_desalination(context: "Context"):
"""Add desalination infrastructure
Two types of desalination are considered;
Expand Down
60 changes: 32 additions & 28 deletions message_ix_models/model/water/reporting.py
Original file line number Diff line number Diff line change
Expand Up @@ -1186,32 +1186,7 @@ def report(sc: Scenario, reg: str, sdgs: bool = False):
wp["unit"] = "US$2010/m3"
wp = wp.rename(columns={"node": "region"})
# get withdrawals for weighted mean
ww = report_iam.as_pandas()
ww = ww[
ww.variable.isin(
["out|final|rural_mw|rural_t_d|M1", "out|final|urban_mw|urban_t_d|M1"]
)
]
ww["commodity"] = np.where(
ww.variable.str.contains("urban_mw"), "urban_mw", "rural_mw"
)
ww["wdr"] = ww["value"]
if not suban:
ww = ww[["region", "year", "commodity", "wdr"]]
else:
ww = ww[["region", "year", "subannual", "commodity", "wdr"]]
ww = pd.concat(
[
ww,
(
ww.groupby(["region", "year", "commodity"])["wdr"]
.sum()
.reset_index()
.assign(subannual="year")
.loc[:, ["region", "year", "subannual", "commodity", "wdr"]]
),
]
).reset_index(drop=True)
ww = prepare_ww(ww_input=report_iam.as_pandas(), suban=suban)
# irrigation water, at regional level
# need to update for global model now we have 3 irrigation
# probably will need to do a scaled agerave with the ww, no basin level
Expand All @@ -1223,8 +1198,7 @@ def report(sc: Scenario, reg: str, sdgs: bool = False):
# driking water
wr_dri = wp[wp.commodity.isin(["urban_mw", "rural_mw"])]
wr_dri = wr_dri.drop(columns={"level", "lvl", "mrg"})
if suban:
wr_dri = wr_dri.rename(columns={"time": "subannual"})
wr_dri = wr_dri.rename(columns={"time": "subannual"}) if suban else wr_dri
wr_dri = wr_dri.merge(ww, how="left")
wr_dri["variable"] = np.where(
wr_dri.commodity == "urban_mw",
Expand Down Expand Up @@ -1380,6 +1354,36 @@ def report(sc: Scenario, reg: str, sdgs: bool = False):
sc.commit("Reporting uploaded as timeseries")


def prepare_ww(ww_input: pd.DataFrame, suban: bool) -> pd.DataFrame:
ww = ww_input[
ww_input.variable.isin(
["out|final|rural_mw|rural_t_d|M1", "out|final|urban_mw|urban_t_d|M1"]
)
]
ww["commodity"] = np.where(
ww.variable.str.contains("urban_mw"), "urban_mw", "rural_mw"
)
ww["wdr"] = ww["value"]
if not suban:
ww = ww[["region", "year", "commodity", "wdr"]]
else:
ww = ww[["region", "year", "subannual", "commodity", "wdr"]]
ww = pd.concat(
[
ww,
(
ww.groupby(["region", "year", "commodity"])["wdr"]
.sum()
.reset_index()
.assign(subannual="year")
.loc[:, ["region", "year", "subannual", "commodity", "wdr"]]
),
]
).reset_index(drop=True)

return ww


def report_full(sc: Scenario, reg: str, sdgs=False):
"""Combine old and new reporting workflows"""
a = sc.timeseries()
Expand Down

0 comments on commit c3701f5

Please sign in to comment.