Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add energy storage interface #567

Draft
wants to merge 35 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from 21 commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
f59e9cb
add pypsa-eur fixes for add_extra_components
pz-max Jan 15, 2023
2cdbdc2
restructure code to read config from main
pz-max Jan 16, 2023
f87c389
add general energy storage interface in add_electricity
pz-max Jan 16, 2023
9823521
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 16, 2023
4d3ba14
add storage unit in add_extra_component
pz-max Jan 17, 2023
6b77699
merge X1 branch
pz-max Jan 17, 2023
9f1f30e
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 17, 2023
9f479f0
rename and clean
pz-max Jan 17, 2023
141ec36
add generic attach store function
pz-max Jan 17, 2023
9e8d98c
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 17, 2023
3519720
add generic storage function in solve_network
pz-max Jan 17, 2023
e1235dd
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 18, 2023
e7aa91d
add fixes
pz-max Jan 19, 2023
3bb0c26
Merge branch 'X1' of https://github.com/pz-max/pypsa-earth into X1
pz-max Jan 19, 2023
19fc88a
Merge branch 'main' into X1
pz-max Jan 24, 2023
bedee1c
add workflow structure
pz-max Jan 24, 2023
b4d1032
implement feature assessment
pz-max Jan 25, 2023
0436656
add changes and merge monte-carlo scripts
pz-max Jan 26, 2023
2362960
Merge branch 'main' into X1
pz-max Jan 26, 2023
b009908
Merge pull request #1 from pz-max/X2
pz-max Jan 26, 2023
897ed02
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 26, 2023
bdf606d
add reviewed changes
pz-max Jan 26, 2023
72eda79
add docstring
pz-max Jan 26, 2023
c317272
add TODO
pz-max Jan 26, 2023
ff28fd3
Merge branch 'main' into X1
pz-max Jan 26, 2023
14fe873
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 26, 2023
b8d4180
Merge branch 'pypsa-meets-earth:main' into X1
pz-max Jan 31, 2023
6838d71
Merge branch 'pypsa-meets-earth:main' into X1
pz-max Feb 3, 2023
17c30e5
Merge branch 'pypsa-meets-earth:main' into X1
pz-max Feb 8, 2023
8e6d6d1
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 8, 2023
99cf8df
enable monte in scenario management
pz-max Feb 9, 2023
25ce05f
Merge branch 'X1' of https://github.com/pz-max/pypsa-earth into X1
pz-max Feb 9, 2023
322652f
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 9, 2023
11656a9
Merge branch 'main' into X1
pz-max May 24, 2023
8840365
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 24, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
89 changes: 41 additions & 48 deletions Snakefile
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ from snakemake.remote.HTTP import RemoteProvider as HTTPRemoteProvider
from scripts._helpers import create_country_list
from scripts.add_electricity import get_load_paths_gegis
from scripts.retrieve_databundle_light import datafiles_retrivedatabundle
from scripts.monte_carlo import wildcard_creator

HTTP = HTTPRemoteProvider()

Expand All @@ -30,12 +31,12 @@ configfile: "configs/bundle_config.yaml"
# convert country list according to the desired region
config["countries"] = create_country_list(config["countries"])


# create a list of iteration steps, required to solve the experimental design
# each value is used as wildcard input e.g. solution_{unc}
config["scenario"]["unc"] = [
f"m{i}" for i in range(config["monte_carlo"]["options"]["samples"])
]
if config["monte_carlo"].get("add_to_snakefile", False) == True:
config["scenario"]["unc"] = wildcard_creator(
config, config["monte_carlo"]["options"].get("method")
pz-max marked this conversation as resolved.
Show resolved Hide resolved
)

run = config.get("run", {})
RDIR = run["name"] + "/" if run.get("name") else ""
Expand Down Expand Up @@ -633,41 +634,7 @@ def memory(w):
return int(factor * (10000 + 195 * int(w.clusters)))


if config["monte_carlo"]["options"].get("add_to_snakefile", False) == False:

rule solve_network:
input:
"networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
output:
"results/" + RDIR + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
log:
solver=normpath(
"logs/"
+ RDIR
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log"
),
python="logs/"
+ RDIR
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log",
memory="logs/"
+ RDIR
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log",
benchmark:
(
"benchmarks/"
+ RDIR
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
)
threads: 20
resources:
mem=memory,
shadow:
"shallow"
script:
"scripts/solve_network.py"


if config["monte_carlo"]["options"].get("add_to_snakefile", False) == True:
if config["monte_carlo"].get("add_to_snakefile", False) == True:

rule monte_carlo:
input:
Expand All @@ -690,18 +657,10 @@ if config["monte_carlo"]["options"].get("add_to_snakefile", False) == True:
script:
"scripts/monte_carlo.py"

rule solve_monte:
input:
expand(
"networks/"
+ RDIR
+ "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}.nc",
**config["scenario"]
),

rule solve_network:
input:
"networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}.nc",
tech_costs=COSTS,
output:
"results/"
+ RDIR
Expand Down Expand Up @@ -741,6 +700,40 @@ if config["monte_carlo"]["options"].get("add_to_snakefile", False) == True:
**config["scenario"]
),

else:
pz-max marked this conversation as resolved.
Show resolved Hide resolved

rule solve_network:
input:
"networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
tech_costs=COSTS,
output:
"results/" + RDIR + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
log:
solver=normpath(
"logs/"
+ RDIR
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log"
),
python="logs/"
+ RDIR
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log",
memory="logs/"
+ RDIR
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log",
benchmark:
(
"benchmarks/"
+ RDIR
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
)
threads: 20
resources:
mem=memory,
shadow:
"shallow"
script:
"scripts/solve_network.py"


def input_make_summary(w):
# It's mildly hacky to include the separate costs input as first entry
Expand Down
14 changes: 8 additions & 6 deletions config.default.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -312,18 +312,20 @@ costs:


monte_carlo:
add_to_snakefile: true
options:
add_to_snakefile: false
samples: 7 # number of optimizations
sampling_strategy: "chaospy" # "pydoe2", "chaospy", "scipy", packages that are supported
method: "global_sensitivity" # or any_chance_store_test
pz-max marked this conversation as resolved.
Show resolved Hide resolved
samples: 7 # number of optimizations
sampling_strategy: "chaospy" # "pydoe2", "chaospy", "scipy", packages that are supported
pypsa_standard:
# User can add here flexibly more features for the Monte-Carlo sampling.
# Given as "key: value" format
# Key: add below the pypsa object for the monte_carlo sampling, "network" is only allowed for filtering!
# Value: currently supported format [l_bound, u_bound] or empty [], represent multiplication factors for the object
loads_t.p_set: [0.9, 1.1]
generators_t.p_max_pu.loc[:, n.generators.carrier == "wind"]: [0.9, 1.1]
generators_t.p_max_pu.loc[:, n.generators.carrier == "solar"]: [0.9, 1.1]
# Examples:
# loads_t.p_set: [0.9, 1.1]
# generators_t.p_max_pu.loc[:, n.generators.carrier == "wind"]: [0.9, 1.1]
stores.capital_cost: [0.8, 1.2] # for any_chance_store_test


solving:
Expand Down
14 changes: 8 additions & 6 deletions config.tutorial.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -308,18 +308,20 @@ costs:


monte_carlo:
add_to_snakefile: true
options:
add_to_snakefile: false
samples: 7 # number of optimizations
sampling_strategy: "chaospy" # "pydoe2", "chaospy", "scipy", packages that are supported
method: "global_sensitivity" # or any_chance_store_test
samples: 7 # number of optimizations
sampling_strategy: "chaospy" # "pydoe2", "chaospy", "scipy", packages that are supported
pypsa_standard:
# User can add here flexibly more features for the Monte-Carlo sampling.
# Given as "key: value" format
# Key: add below the pypsa object for the monte_carlo sampling, "network" is only allowed for filtering!
# Value: currently supported format [l_bound, u_bound] or empty [], represent multiplication factors for the object
loads_t.p_set: [0.9, 1.1]
generators_t.p_max_pu.loc[:, n.generators.carrier == "wind"]: [0.9, 1.1]
generators_t.p_max_pu.loc[:, n.generators.carrier == "solar"]: [0.9, 1.1]
# Examples:
# loads_t.p_set: [0.9, 1.1]
# generators_t.p_max_pu.loc[:, n.generators.carrier == "wind"]: [0.9, 1.1]
stores.capital_cost: [0.8, 1.2] # for any_chance_store_test


solving:
Expand Down
16 changes: 9 additions & 7 deletions doc/configtables/monte-carlo.csv
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
,Unit,Values,Description
options,,,
-- add_to_snakemake,,true or false,Add rule to Snakefile or not
-- samples,,"int** read description!", "Defines the number of total sample networks that will be later optimized. if orthogonal latin hypercube sampling is applied (default option in scripts), then a prime number, that is not a quadrat of a prime, needs to be chosen. E.g. 7 not 9 (3^2)"
-- sampling_strategy,,"Any subset of {""pydoe2"", ""chaospy"", ""scipy""}",Current supported packages to create an experimental design
pypsa_standard,,,
-- <any pypsa.object syntax>,MW/MWh,"[l_bound, u_bound] or empty []","`Key` is a dynamic PyPSA object that allows to access any pypsa object such as `loads_t.p_set` or the max. wind generation per hour `generators_t.p_max_pu.loc[:, n.generators.carrier == ""wind""]`. `Values` or bounds are multiplication for each object."
,Unit,Values,Description,,,
add_to_snakemake,,true or false,Add rule to Snakefile or not,,,
options,,,,,,
-- method,,"""global_sensitivity"" or ""any_chance_store_test""","""global_sensitivity"" create scenarios in latin hypercube space, or ""any_chance_store_test"" create scenarios where all values are pessimistic and only one is optimistic""global_sensitivity"" create scenarios in latin hypercube space, or ""any_chance_store_test"" create scenarios where all values are pessimistic and only one is optimistic",,,
-- samples,,integer," Defines the number of total sample networks that will be later optimized. if orthogonal latin hypercube sampling is applied (default option in scripts) then a prime number that is not a quadrat of a prime needs to be chosen. E.g. 7 not 9 (3^2)""",,,
-- sampling_strategy,,"Any subset of {""pydoe2"", ""chaospy"", ""scipy""}",Current supported packages to create an experimental design,,,
pypsa_standard,,,,,,
-- <any pypsa.object syntax>,MW/MWh,"[l_bound, u_bound] or empty []","`Key` is a dynamic PyPSA object that allows to access any pypsa object such as `loads_t.p_set` or the max. wind generation per hour `generators_t.p_max_pu.loc[:, n.generators.carrier == ""wind""]`. `Values` or bounds are multiplication for each object. Method ""any_chance_store_test"" only supports e.g. n.stores = [0.7,1.3]",,,
,,,,,,
82 changes: 82 additions & 0 deletions scripts/_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -610,6 +610,88 @@ def read_geojson(fn):
return gpd.GeoDataFrame(geometry=[])


def nested_storage_dict(tech_costs):
"""
Create a nested dictionary with a storage index and meta data relation.
pz-max marked this conversation as resolved.
Show resolved Hide resolved

Input:
------
tech_costs: str, path to technology costs.csv file

Output:
-------
nested_dict: dict, nested dictionary with storage index and meta data relation
storage_techs: list, list of unique storage technologies

Example:
--------
Data format in Input:
costs['further description'][0] -> {'carrier': ['elec', 'nizn', 'elec'], 'technology_type': ['bicharger'], 'type': ['electrochemical']} with index 'Ni-Zn-bicharger'
costs['further description'][1] -> {'carrier': ['nizn'], 'technology_type': ['store'], 'type': ['electrochemical']} with index 'Ni-Zn-store'

Output:
.. code-block:: python
{
'Ni-Zn-bicharger': {'carrier': ['elec', 'nizn', 'elec'], 'technology_type': ['bicharger'], 'type': ['electrochemical']}
'Ni-Zn-store': {'carrier': ['nizn'], 'technology_type': ['store'], 'type': ['electrochemical']}
...
}
"""
import ast

df = pd.read_csv(
tech_costs,
index_col=["technology"],
usecols=["technology", "further description"],
).sort_index()
df = df[df["further description"].str.contains("{'carrier':", na=False)]
storage_techs = df.index.unique()
nested_storage_dict = {}
if df.empty:
print("No storage technology found in costs.csv")
else:
for i in range(len(df)):
storage_dict = ast.literal_eval(
df.iloc[i, 0]
) # https://stackoverflow.com/a/988251/13573820
_ = storage_dict.pop("note", None)
pz-max marked this conversation as resolved.
Show resolved Hide resolved
nested_storage_dict[df.index[i]] = storage_dict
return [nested_storage_dict, storage_techs]


def add_storage_col_to_costs(costs, storage_meta_dict, storage_techs):
"""
Add storage specific columns to costs.csv.
pz-max marked this conversation as resolved.
Show resolved Hide resolved

Input:
------
costs: pd.DataFrame, costs.csv
storage_meta_dict: dict, nested dictionary with storage index and meta data relation
storage_techs: list, list of unique storage technologies

Output:
-------
costs: pd.DataFrame, costs.csv with added storage specific columns
"""
# add storage specific columns to costs.csv
for c in ["carrier", "technology_type", "type"]:
costs.loc[storage_techs, c] = [
storage_meta_dict[X][c] for X in costs.loc[storage_techs].index
]
# remove all 'elec's from carrier columns and read carrier as string
for i in range(len(costs.loc[storage_techs])):
costs.loc[storage_techs[i], "carrier"] = "".join(
[e for e in costs.loc[storage_techs].carrier.iloc[i] if e != "elec"]
)
costs.loc[storage_techs[i], "technology_type"] = "".join(
costs.loc[storage_techs].technology_type.iloc[i]
)
costs.loc[storage_techs[i], "type"] = "".join(
costs.loc[storage_techs].type.iloc[i]
)
return costs


def create_country_list(input, iso_coding=True):
"""
Create a country list for defined regions in config_osm_data.py
Expand Down
43 changes: 36 additions & 7 deletions scripts/add_electricity.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,13 @@
import powerplantmatching as pm
import pypsa
import xarray as xr
from _helpers import configure_logging, getContinent, update_p_nom_max
from _helpers import (
add_storage_col_to_costs,
configure_logging,
getContinent,
nested_storage_dict,
update_p_nom_max,
)
from shapely.validation import make_valid
from vresutils import transfer as vtransfer

Expand Down Expand Up @@ -155,6 +161,7 @@ def load_costs(tech_costs, config, elec_config, Nyears=1):

costs = costs.value.unstack().fillna(config["fill_values"])

# set marginal cost to all components generators, charger, discharger, store, ...
costs["capital_cost"] = (
(
calculate_annuity(costs["lifetime"], costs["discount rate"])
Expand Down Expand Up @@ -187,27 +194,49 @@ def load_costs(tech_costs, config, elec_config, Nyears=1):
+ (1 - config["rooftop_share"]) * costs.at["solar-utility", "capital_cost"]
)

def costs_for_storage(store, link1, link2=None, max_hours=1.0):
capital_cost = link1["capital_cost"] + max_hours * store["capital_cost"]
if link2 is not None:
capital_cost += link2["capital_cost"]
def costs_for_storageunit(store, link1, link2=pd.DataFrame(), max_hours=1.0):
capital_cost = float(link1["capital_cost"]) + float(max_hours) * float(
store["capital_cost"]
)
if not link2.empty:
capital_cost += float(link2["capital_cost"])
return pd.Series(
dict(capital_cost=capital_cost, marginal_cost=0.0, co2_emissions=0.0)
)

max_hours = elec_config["max_hours"]
costs.loc["battery"] = costs_for_storage(
costs.loc["battery"] = costs_for_storageunit(
costs.loc["battery storage"],
costs.loc["battery inverter"],
max_hours=max_hours["battery"],
)
costs.loc["H2"] = costs_for_storage(
costs.loc["H2"] = costs_for_storageunit(
costs.loc["hydrogen storage tank"],
costs.loc["fuel cell"],
costs.loc["electrolysis"],
max_hours=max_hours["H2"],
)

storage_meta_dict, storage_techs = nested_storage_dict(tech_costs)
costs = add_storage_col_to_costs(costs, storage_meta_dict, storage_techs)

# add capital_cost to all storage_units indexed by carrier e.g. "lead" or "concrete"
for c in costs.loc[storage_techs, "carrier"].unique():
carrier = costs.carrier
tech_type = costs.technology_type
store_filter = (carrier == c) & (tech_type == "store")
charger_or_bicharger_filter = (carrier == c) & (
(tech_type == "bicharger") | (tech_type == "charger")
)
discharger_filter = (carrier == c) & (tech_type == "discharger")
costs.loc[c] = costs_for_storageunit(
costs.loc[store_filter],
costs.loc[charger_or_bicharger_filter],
costs.loc[discharger_filter],
max_hours=max_hours["battery"],
# TODO: max_hours data should be read as costs.loc[carrier==c,max_hours] (easy possible)
)

for attr in ("marginal_cost", "capital_cost"):
overwrites = config.get(attr)
if overwrites is not None:
Expand Down
Loading