From f59e9cbc6e5569033e2fd88be23900b6a5f3b9b6 Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Sun, 15 Jan 2023 21:39:09 +0000 Subject: [PATCH 01/24] add pypsa-eur fixes for add_extra_components --- scripts/add_extra_components.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index 5301db830..3e830f654 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -82,6 +82,7 @@ def attach_storageunits(n, costs): lookup_dispatch = {"H2": "fuel cell", "battery": "battery inverter"} for carrier in carriers: + roundtrip_correction = 0.5 if carrier == "battery" else 1 n.madd( "StorageUnit", buses_i, @@ -91,8 +92,10 @@ def attach_storageunits(n, costs): p_nom_extendable=True, capital_cost=costs.at[carrier, "capital_cost"], marginal_cost=costs.at[carrier, "marginal_cost"], - efficiency_store=costs.at[lookup_store[carrier], "efficiency"], - efficiency_dispatch=costs.at[lookup_dispatch[carrier], "efficiency"], + efficiency_store=costs.at[lookup_store[carrier], "efficiency"] + ** roundtrip_correction, + efficiency_dispatch=costs.at[lookup_dispatch[carrier], "efficiency"] + ** roundtrip_correction, max_hours=max_hours[carrier], cyclic_state_of_charge=True, ) @@ -140,6 +143,7 @@ def attach_stores(n, costs): carrier="H2 fuel cell", p_nom_extendable=True, efficiency=costs.at["fuel cell", "efficiency"], + # NB: fixed cost is per MWel, so we need to divide by efficiency capital_cost=costs.at["fuel cell", "capital_cost"] * costs.at["fuel cell", "efficiency"], marginal_cost=costs.at["fuel cell", "marginal_cost"], @@ -167,7 +171,8 @@ def attach_stores(n, costs): bus0=buses_i, bus1=b_buses_i, carrier="battery charger", - efficiency=costs.at["battery inverter", "efficiency"], + # NB: the efficiencies are "round trip efficiencies" + efficiency=costs.at["battery inverter", "efficiency"] ** 0.5, capital_cost=costs.at["battery inverter", "capital_cost"], p_nom_extendable=True, marginal_cost=costs.at["battery inverter", "marginal_cost"], @@ -179,7 +184,8 @@ def attach_stores(n, costs): bus0=b_buses_i, bus1=buses_i, carrier="battery discharger", - efficiency=costs.at["battery inverter", "efficiency"], + # NB: the efficiencies are "round trip efficiencies" + efficiency=costs.at["battery inverter", "efficiency"] ** 0.5, p_nom_extendable=True, marginal_cost=costs.at["battery inverter", "marginal_cost"], ) From 2cdbdc2800665a4c895c8a60074910b863abea84 Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Mon, 16 Jan 2023 11:46:51 +0000 Subject: [PATCH 02/24] restructure code to read config from main --- scripts/add_extra_components.py | 52 +++++++++++++++++++-------------- 1 file changed, 30 insertions(+), 22 deletions(-) diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index 3e830f654..f901cc293 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -69,10 +69,9 @@ logger = logging.getLogger(__name__) -def attach_storageunits(n, costs): - elec_opts = snakemake.config["electricity"] - carriers = elec_opts["extendable_carriers"]["StorageUnit"] - max_hours = elec_opts["max_hours"] +def attach_storageunits(n, costs, config): + carriers = config["electricity"]["extendable_carriers"]["StorageUnit"] + max_hours = config["electricity"]["max_hours"] _add_missing_carriers_from_costs(n, costs, carriers) @@ -99,17 +98,28 @@ def attach_storageunits(n, costs): max_hours=max_hours[carrier], cyclic_state_of_charge=True, ) + + ### TODO: GENERAL STORAGE_UNIT ATTACH FUNCTION -def attach_stores(n, costs): - elec_opts = snakemake.config["electricity"] - carriers = elec_opts["extendable_carriers"]["Store"] +def attach_stores(n, costs, config): + """ + Add storage units as store and links. + + Two types of storage exists: + 1. where every components can be independently sized e.g. hydrogen + 2. where charger and discharger are the same component e.g. Li-battery (inverter) + """ + carriers = config["electricity"]["extendable_carriers"]["Store"] _add_missing_carriers_from_costs(n, costs, carriers) buses_i = n.buses.index bus_sub_dict = {k: n.buses[k].values for k in ["x", "y", "country"]} + ### TODO: GENERAL STORE ATTACH FUNCTION + # Combine bicharger model & traditional model? (or even more general e.g. 5 step charger) + if "H2" in carriers: h2_buses_i = n.madd("Bus", buses_i + " H2", carrier="H2", **bus_sub_dict) @@ -143,9 +153,7 @@ def attach_stores(n, costs): carrier="H2 fuel cell", p_nom_extendable=True, efficiency=costs.at["fuel cell", "efficiency"], - # NB: fixed cost is per MWel, so we need to divide by efficiency - capital_cost=costs.at["fuel cell", "capital_cost"] - * costs.at["fuel cell", "efficiency"], + capital_cost=costs.at["fuel cell", "capital_cost"], marginal_cost=costs.at["fuel cell", "marginal_cost"], ) @@ -172,7 +180,7 @@ def attach_stores(n, costs): bus1=b_buses_i, carrier="battery charger", # NB: the efficiencies are "round trip efficiencies" - efficiency=costs.at["battery inverter", "efficiency"] ** 0.5, + efficiency=costs.at["battery inverter", "efficiency"], capital_cost=costs.at["battery inverter", "capital_cost"], p_nom_extendable=True, marginal_cost=costs.at["battery inverter", "marginal_cost"], @@ -185,15 +193,14 @@ def attach_stores(n, costs): bus1=buses_i, carrier="battery discharger", # NB: the efficiencies are "round trip efficiencies" - efficiency=costs.at["battery inverter", "efficiency"] ** 0.5, + efficiency=costs.at["battery inverter", "efficiency"], p_nom_extendable=True, marginal_cost=costs.at["battery inverter", "marginal_cost"], ) -def attach_hydrogen_pipelines(n, costs): - elec_opts = snakemake.config["electricity"] - ext_carriers = elec_opts["extendable_carriers"] +def attach_hydrogen_pipelines(n, costs, config): + ext_carriers = config["electricity"]["extendable_carriers"] as_stores = ext_carriers.get("Store", []) if "H2 pipeline" not in ext_carriers.get("Link", []): @@ -242,18 +249,19 @@ def attach_hydrogen_pipelines(n, costs): n = pypsa.Network(snakemake.input.network) Nyears = n.snapshot_weightings.objective.sum() / 8760.0 + config = snakemake.config costs = load_costs( snakemake.input.tech_costs, - snakemake.config["costs"], - snakemake.config["electricity"], + config["costs"], + config["electricity"], Nyears, ) - attach_storageunits(n, costs) - attach_stores(n, costs) - attach_hydrogen_pipelines(n, costs) + attach_storageunits(n, costs, config) + attach_stores(n, costs, config) + attach_hydrogen_pipelines(n, costs, config) - add_nice_carrier_names(n, config=snakemake.config) + add_nice_carrier_names(n, config=config) - n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) + n.meta = dict(config, **dict(wildcards=dict(snakemake.wildcards))) n.export_to_netcdf(snakemake.output[0]) From f87c389618001f49405cc4c0b691c93572a9d729 Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Mon, 16 Jan 2023 23:03:19 +0000 Subject: [PATCH 03/24] add general energy storage interface in add_electricity --- scripts/_helpers.py | 68 ++++++++++++++++++++++++++++++++++++++ scripts/add_electricity.py | 28 ++++++++++++---- 2 files changed, 89 insertions(+), 7 deletions(-) diff --git a/scripts/_helpers.py b/scripts/_helpers.py index e210168b9..0add771e7 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -605,3 +605,71 @@ def read_geojson(fn): else: # else return an empty GeoDataFrame return gpd.GeoDataFrame(geometry=[]) + + +def nested_storage_dict(tech_costs): + """ + Create a nested dictionary with a storage index and meta data relation. + + Input: + ------ + tech_costs: str, path to technology costs.csv file + + Output: + ------- + nested_dict: dict, nested dictionary with storage index and meta data relation + storage_techs: list, list of unique storage technologies + + Example: + -------- + Data format in Input: + costs['further description'][0] -> {'carrier': ['elec', 'nizn', 'elec'], 'technology_type': ['bicharger'], 'type': ['electrochemical']} with index 'Ni-Zn-bicharger' + costs['further description'][1] -> {'carrier': ['nizn'], 'technology_type': ['store'], 'type': ['electrochemical']} with index 'Ni-Zn-store' + + Output: + .. code-block:: python + { + 'Ni-Zn-bicharger': {'carrier': ['elec', 'nizn', 'elec'], 'technology_type': ['bicharger'], 'type': ['electrochemical']} + 'Ni-Zn-store': {'carrier': ['nizn'], 'technology_type': ['store'], 'type': ['electrochemical']} + ... + } + """ + import ast + + df = pd.read_csv(tech_costs, index_col=["technology"], usecols=["technology","further description"]).sort_index() + df = df[df['further description'].str.contains("{'carrier':", na = False)] + storage_techs = df.index.unique() + nested_storage_dict = {} + if df.empty: + print("No storage technology found in costs.csv") + else: + for i in range(len(df)): + storage_dict = ast.literal_eval(df.iloc[i,0]) # https://stackoverflow.com/a/988251/13573820 + _ = storage_dict.pop('note', None) + nested_storage_dict[df.index[i]] = storage_dict + return [nested_storage_dict, storage_techs] + + +def add_storage_col_to_costs(costs, storage_meta_dict, storage_techs): + """ + Add storage specific columns to costs.csv. + + Input: + ------ + costs: pd.DataFrame, costs.csv + storage_meta_dict: dict, nested dictionary with storage index and meta data relation + storage_techs: list, list of unique storage technologies + + Output: + ------- + costs: pd.DataFrame, costs.csv with added storage specific columns + """ + # add storage specific columns to costs.csv + for c in ["carrier", "technology_type", "type"]: + costs.loc[storage_techs, c] = [storage_meta_dict[X][c] for X in costs.loc[storage_techs].index] + # remove all 'elec's from carrier columns and read carrier as string + for i in range(len(costs.loc[storage_techs])): + costs.loc[storage_techs[i], "carrier"] = ''.join([e for e in costs.loc[storage_techs].carrier.iloc[i] if e!="elec"]) + costs.loc[storage_techs[i], "technology_type"] = ''.join(costs.loc[storage_techs].technology_type.iloc[i]) + costs.loc[storage_techs[i], "type"] = ''.join(costs.loc[storage_techs].type.iloc[i]) + return costs diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index 92dfc590c..b0c1a00cf 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -100,7 +100,7 @@ import powerplantmatching as pm import pypsa import xarray as xr -from _helpers import configure_logging, getContinent, update_p_nom_max +from _helpers import configure_logging, getContinent, update_p_nom_max, add_storage_col_to_costs, nested_storage_dict from shapely.validation import make_valid from vresutils import transfer as vtransfer @@ -187,27 +187,41 @@ def load_costs(tech_costs, config, elec_config, Nyears=1): + (1 - config["rooftop_share"]) * costs.at["solar-utility", "capital_cost"] ) - def costs_for_storage(store, link1, link2=None, max_hours=1.0): - capital_cost = link1["capital_cost"] + max_hours * store["capital_cost"] - if link2 is not None: - capital_cost += link2["capital_cost"] + def costs_for_storageunit(store, link1, link2=pd.DataFrame(), max_hours=1.0): + capital_cost = float(link1["capital_cost"]) + float(max_hours) * float(store["capital_cost"]) + if not link2.empty: + capital_cost += float(link2["capital_cost"]) return pd.Series( dict(capital_cost=capital_cost, marginal_cost=0.0, co2_emissions=0.0) ) max_hours = elec_config["max_hours"] - costs.loc["battery"] = costs_for_storage( + costs.loc["battery"] = costs_for_storageunit( costs.loc["battery storage"], costs.loc["battery inverter"], max_hours=max_hours["battery"], ) - costs.loc["H2"] = costs_for_storage( + costs.loc["H2"] = costs_for_storageunit( costs.loc["hydrogen storage tank"], costs.loc["fuel cell"], costs.loc["electrolysis"], max_hours=max_hours["H2"], ) + storage_meta_dict, storage_techs = nested_storage_dict(tech_costs) + costs = add_storage_col_to_costs(costs, storage_meta_dict, storage_techs) + + # add capital_cost to all storage units + for c in costs.loc[storage_techs,"carrier"].unique(): + carrier = costs.carrier + tech_type = costs.technology_type + costs.loc[c] = costs_for_storageunit( + costs.loc[(carrier==c) & (tech_type=="store")], + costs.loc[(carrier==c) & ((tech_type=="bicharger") | (tech_type=="charger"))], + costs.loc[(carrier==c) & (tech_type=="discharger")], + max_hours=max_hours["battery"], # TODO: max_hours data should be read as costs.loc[carrier==c,max_hours] (easy possible) + ) + for attr in ("marginal_cost", "capital_cost"): overwrites = config.get(attr) if overwrites is not None: From 9823521242d36f8af24eda3ccc938326ce43d952 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 16 Jan 2023 23:17:31 +0000 Subject: [PATCH 04/24] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- scripts/_helpers.py | 38 ++++++++++++++++++++++----------- scripts/add_electricity.py | 28 +++++++++++++++++------- scripts/add_extra_components.py | 2 +- 3 files changed, 47 insertions(+), 21 deletions(-) diff --git a/scripts/_helpers.py b/scripts/_helpers.py index 0add771e7..01eed01f4 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -610,7 +610,7 @@ def read_geojson(fn): def nested_storage_dict(tech_costs): """ Create a nested dictionary with a storage index and meta data relation. - + Input: ------ tech_costs: str, path to technology costs.csv file @@ -619,7 +619,7 @@ def nested_storage_dict(tech_costs): ------- nested_dict: dict, nested dictionary with storage index and meta data relation storage_techs: list, list of unique storage technologies - + Example: -------- Data format in Input: @@ -636,16 +636,22 @@ def nested_storage_dict(tech_costs): """ import ast - df = pd.read_csv(tech_costs, index_col=["technology"], usecols=["technology","further description"]).sort_index() - df = df[df['further description'].str.contains("{'carrier':", na = False)] + df = pd.read_csv( + tech_costs, + index_col=["technology"], + usecols=["technology", "further description"], + ).sort_index() + df = df[df["further description"].str.contains("{'carrier':", na=False)] storage_techs = df.index.unique() nested_storage_dict = {} if df.empty: print("No storage technology found in costs.csv") else: for i in range(len(df)): - storage_dict = ast.literal_eval(df.iloc[i,0]) # https://stackoverflow.com/a/988251/13573820 - _ = storage_dict.pop('note', None) + storage_dict = ast.literal_eval( + df.iloc[i, 0] + ) # https://stackoverflow.com/a/988251/13573820 + _ = storage_dict.pop("note", None) nested_storage_dict[df.index[i]] = storage_dict return [nested_storage_dict, storage_techs] @@ -653,23 +659,31 @@ def nested_storage_dict(tech_costs): def add_storage_col_to_costs(costs, storage_meta_dict, storage_techs): """ Add storage specific columns to costs.csv. - + Input: ------ costs: pd.DataFrame, costs.csv storage_meta_dict: dict, nested dictionary with storage index and meta data relation storage_techs: list, list of unique storage technologies - + Output: ------- costs: pd.DataFrame, costs.csv with added storage specific columns """ # add storage specific columns to costs.csv for c in ["carrier", "technology_type", "type"]: - costs.loc[storage_techs, c] = [storage_meta_dict[X][c] for X in costs.loc[storage_techs].index] + costs.loc[storage_techs, c] = [ + storage_meta_dict[X][c] for X in costs.loc[storage_techs].index + ] # remove all 'elec's from carrier columns and read carrier as string for i in range(len(costs.loc[storage_techs])): - costs.loc[storage_techs[i], "carrier"] = ''.join([e for e in costs.loc[storage_techs].carrier.iloc[i] if e!="elec"]) - costs.loc[storage_techs[i], "technology_type"] = ''.join(costs.loc[storage_techs].technology_type.iloc[i]) - costs.loc[storage_techs[i], "type"] = ''.join(costs.loc[storage_techs].type.iloc[i]) + costs.loc[storage_techs[i], "carrier"] = "".join( + [e for e in costs.loc[storage_techs].carrier.iloc[i] if e != "elec"] + ) + costs.loc[storage_techs[i], "technology_type"] = "".join( + costs.loc[storage_techs].technology_type.iloc[i] + ) + costs.loc[storage_techs[i], "type"] = "".join( + costs.loc[storage_techs].type.iloc[i] + ) return costs diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index b0c1a00cf..e5c43e88d 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -100,7 +100,13 @@ import powerplantmatching as pm import pypsa import xarray as xr -from _helpers import configure_logging, getContinent, update_p_nom_max, add_storage_col_to_costs, nested_storage_dict +from _helpers import ( + add_storage_col_to_costs, + configure_logging, + getContinent, + nested_storage_dict, + update_p_nom_max, +) from shapely.validation import make_valid from vresutils import transfer as vtransfer @@ -188,7 +194,9 @@ def load_costs(tech_costs, config, elec_config, Nyears=1): ) def costs_for_storageunit(store, link1, link2=pd.DataFrame(), max_hours=1.0): - capital_cost = float(link1["capital_cost"]) + float(max_hours) * float(store["capital_cost"]) + capital_cost = float(link1["capital_cost"]) + float(max_hours) * float( + store["capital_cost"] + ) if not link2.empty: capital_cost += float(link2["capital_cost"]) return pd.Series( @@ -212,15 +220,19 @@ def costs_for_storageunit(store, link1, link2=pd.DataFrame(), max_hours=1.0): costs = add_storage_col_to_costs(costs, storage_meta_dict, storage_techs) # add capital_cost to all storage units - for c in costs.loc[storage_techs,"carrier"].unique(): + for c in costs.loc[storage_techs, "carrier"].unique(): carrier = costs.carrier tech_type = costs.technology_type costs.loc[c] = costs_for_storageunit( - costs.loc[(carrier==c) & (tech_type=="store")], - costs.loc[(carrier==c) & ((tech_type=="bicharger") | (tech_type=="charger"))], - costs.loc[(carrier==c) & (tech_type=="discharger")], - max_hours=max_hours["battery"], # TODO: max_hours data should be read as costs.loc[carrier==c,max_hours] (easy possible) - ) + costs.loc[(carrier == c) & (tech_type == "store")], + costs.loc[ + (carrier == c) & ((tech_type == "bicharger") | (tech_type == "charger")) + ], + costs.loc[(carrier == c) & (tech_type == "discharger")], + max_hours=max_hours[ + "battery" + ], # TODO: max_hours data should be read as costs.loc[carrier==c,max_hours] (easy possible) + ) for attr in ("marginal_cost", "capital_cost"): overwrites = config.get(attr) diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index f901cc293..f30bb2f7a 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -98,7 +98,7 @@ def attach_storageunits(n, costs, config): max_hours=max_hours[carrier], cyclic_state_of_charge=True, ) - + ### TODO: GENERAL STORAGE_UNIT ATTACH FUNCTION From 4d3ba1495d91c4801ff7592d6f9404388545a3ba Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Tue, 17 Jan 2023 21:13:17 +0000 Subject: [PATCH 05/24] add storage unit in add_extra_component --- scripts/add_electricity.py | 2 +- scripts/add_extra_components.py | 23 +++++++++++++++++++++-- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index b0c1a00cf..cad39d1cd 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -211,7 +211,7 @@ def costs_for_storageunit(store, link1, link2=pd.DataFrame(), max_hours=1.0): storage_meta_dict, storage_techs = nested_storage_dict(tech_costs) costs = add_storage_col_to_costs(costs, storage_meta_dict, storage_techs) - # add capital_cost to all storage units + # add capital_cost to all storage units indexed by carrier e.g. "lead" or "concrete" for c in costs.loc[storage_techs,"carrier"].unique(): carrier = costs.carrier tech_type = costs.technology_type diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index f901cc293..879655628 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -77,10 +77,12 @@ def attach_storageunits(n, costs, config): buses_i = n.buses.index + carriers_classic = [x for x in carriers if x=="H2" or x=="battery"] + carriers_database = [x for x in carriers if x!="H2" and x!="battery"] lookup_store = {"H2": "electrolysis", "battery": "battery inverter"} lookup_dispatch = {"H2": "fuel cell", "battery": "battery inverter"} - for carrier in carriers: + for carrier in carriers_classic: roundtrip_correction = 0.5 if carrier == "battery" else 1 n.madd( "StorageUnit", @@ -99,7 +101,24 @@ def attach_storageunits(n, costs, config): cyclic_state_of_charge=True, ) - ### TODO: GENERAL STORAGE_UNIT ATTACH FUNCTION + for carrier in carriers_database: + tech_type = costs.technology_type + charge_or_bicharge_filter = (costs.carrier==carrier) & ((tech_type=="charger") | (tech_type=="bicharger")) + charge_or_bicharge_filter = (costs.carrier==carrier) & ((tech_type=="discharger") | (tech_type=="bicharger")) + n.madd( + "StorageUnit", + buses_i, + " " + carrier, + bus=buses_i, + carrier=carrier, + p_nom_extendable=True, + capital_cost=costs.at[carrier, "capital_cost"], + marginal_cost=costs.at[carrier, "marginal_cost"], + efficiency_store=float(costs.loc[charge_or_bicharge_filter, "efficiency"]), + efficiency_dispatch=float(costs.loc[charge_or_bicharge_filter, "efficiency"]), + max_hours=max_hours[carrier], + cyclic_state_of_charge=True, + ) def attach_stores(n, costs, config): From 9f1f30e6803cb189f15264d12838bc510f12657e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 17 Jan 2023 21:18:04 +0000 Subject: [PATCH 06/24] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- scripts/add_electricity.py | 2 +- scripts/add_extra_components.py | 18 ++++++++++++------ 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index 0ff6bbd10..ba1270aa5 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -220,7 +220,7 @@ def costs_for_storageunit(store, link1, link2=pd.DataFrame(), max_hours=1.0): costs = add_storage_col_to_costs(costs, storage_meta_dict, storage_techs) # add capital_cost to all storage units indexed by carrier e.g. "lead" or "concrete" - for c in costs.loc[storage_techs,"carrier"].unique(): + for c in costs.loc[storage_techs, "carrier"].unique(): carrier = costs.carrier tech_type = costs.technology_type costs.loc[c] = costs_for_storageunit( diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index d02069d90..2d6dcd2b2 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -77,8 +77,8 @@ def attach_storageunits(n, costs, config): buses_i = n.buses.index - carriers_classic = [x for x in carriers if x=="H2" or x=="battery"] - carriers_database = [x for x in carriers if x!="H2" and x!="battery"] + carriers_classic = [x for x in carriers if x == "H2" or x == "battery"] + carriers_database = [x for x in carriers if x != "H2" and x != "battery"] lookup_store = {"H2": "electrolysis", "battery": "battery inverter"} lookup_dispatch = {"H2": "fuel cell", "battery": "battery inverter"} @@ -100,11 +100,15 @@ def attach_storageunits(n, costs, config): max_hours=max_hours[carrier], cyclic_state_of_charge=True, ) - + for carrier in carriers_database: tech_type = costs.technology_type - charge_or_bicharge_filter = (costs.carrier==carrier) & ((tech_type=="charger") | (tech_type=="bicharger")) - charge_or_bicharge_filter = (costs.carrier==carrier) & ((tech_type=="discharger") | (tech_type=="bicharger")) + charge_or_bicharge_filter = (costs.carrier == carrier) & ( + (tech_type == "charger") | (tech_type == "bicharger") + ) + charge_or_bicharge_filter = (costs.carrier == carrier) & ( + (tech_type == "discharger") | (tech_type == "bicharger") + ) n.madd( "StorageUnit", buses_i, @@ -115,7 +119,9 @@ def attach_storageunits(n, costs, config): capital_cost=costs.at[carrier, "capital_cost"], marginal_cost=costs.at[carrier, "marginal_cost"], efficiency_store=float(costs.loc[charge_or_bicharge_filter, "efficiency"]), - efficiency_dispatch=float(costs.loc[charge_or_bicharge_filter, "efficiency"]), + efficiency_dispatch=float( + costs.loc[charge_or_bicharge_filter, "efficiency"] + ), max_hours=max_hours[carrier], cyclic_state_of_charge=True, ) From 9f479f0674388bf5c308cc842153422386d920f9 Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Tue, 17 Jan 2023 21:30:24 +0000 Subject: [PATCH 07/24] rename and clean --- scripts/add_electricity.py | 18 +++++++++--------- scripts/add_extra_components.py | 8 ++++---- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index ba1270aa5..2b167d8fc 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -220,18 +220,18 @@ def costs_for_storageunit(store, link1, link2=pd.DataFrame(), max_hours=1.0): costs = add_storage_col_to_costs(costs, storage_meta_dict, storage_techs) # add capital_cost to all storage units indexed by carrier e.g. "lead" or "concrete" - for c in costs.loc[storage_techs, "carrier"].unique(): + for c in costs.loc[storage_techs,"carrier"].unique(): carrier = costs.carrier tech_type = costs.technology_type + store_filter = (carrier == c) & (tech_type == "store") + charger_or_bicharger_filter = (carrier == c) & ((tech_type == "bicharger") | (tech_type == "charger")) + discharger_filter = (carrier == c) & (tech_type == "discharger") costs.loc[c] = costs_for_storageunit( - costs.loc[(carrier == c) & (tech_type == "store")], - costs.loc[ - (carrier == c) & ((tech_type == "bicharger") | (tech_type == "charger")) - ], - costs.loc[(carrier == c) & (tech_type == "discharger")], - max_hours=max_hours[ - "battery" - ], # TODO: max_hours data should be read as costs.loc[carrier==c,max_hours] (easy possible) + costs.loc[store_filter], + costs.loc[charger_or_bicharger_filter], + costs.loc[discharger_filter], + max_hours=max_hours["battery"], + # TODO: max_hours data should be read as costs.loc[carrier==c,max_hours] (easy possible) ) for attr in ("marginal_cost", "capital_cost"): diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index 2d6dcd2b2..b2495fcb9 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -103,10 +103,10 @@ def attach_storageunits(n, costs, config): for carrier in carriers_database: tech_type = costs.technology_type - charge_or_bicharge_filter = (costs.carrier == carrier) & ( + charger_or_bicharger_filter = (costs.carrier == carrier) & ( (tech_type == "charger") | (tech_type == "bicharger") ) - charge_or_bicharge_filter = (costs.carrier == carrier) & ( + discharger_or_bicharger_filter = (costs.carrier == carrier) & ( (tech_type == "discharger") | (tech_type == "bicharger") ) n.madd( @@ -118,9 +118,9 @@ def attach_storageunits(n, costs, config): p_nom_extendable=True, capital_cost=costs.at[carrier, "capital_cost"], marginal_cost=costs.at[carrier, "marginal_cost"], - efficiency_store=float(costs.loc[charge_or_bicharge_filter, "efficiency"]), + efficiency_store=float(costs.loc[charger_or_bicharger_filter, "efficiency"]), efficiency_dispatch=float( - costs.loc[charge_or_bicharge_filter, "efficiency"] + costs.loc[discharger_or_bicharger_filter, "efficiency"] ), max_hours=max_hours[carrier], cyclic_state_of_charge=True, From 141ec363f53b65d6001608ad0fa428b8fe8dee08 Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Tue, 17 Jan 2023 22:57:26 +0000 Subject: [PATCH 08/24] add generic attach store function --- scripts/add_extra_components.py | 63 +++++++++++++++++++++++++++++---- 1 file changed, 56 insertions(+), 7 deletions(-) diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index b2495fcb9..0192f1be3 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -71,14 +71,14 @@ def attach_storageunits(n, costs, config): carriers = config["electricity"]["extendable_carriers"]["StorageUnit"] + carriers_classic = [x for x in carriers if x == "H2" or x == "battery"] + carriers_database = [x for x in carriers if x != "H2" and x != "battery"] max_hours = config["electricity"]["max_hours"] _add_missing_carriers_from_costs(n, costs, carriers) buses_i = n.buses.index - carriers_classic = [x for x in carriers if x == "H2" or x == "battery"] - carriers_database = [x for x in carriers if x != "H2" and x != "battery"] lookup_store = {"H2": "electrolysis", "battery": "battery inverter"} lookup_dispatch = {"H2": "fuel cell", "battery": "battery inverter"} @@ -126,8 +126,6 @@ def attach_storageunits(n, costs, config): cyclic_state_of_charge=True, ) - ### TODO: GENERAL STORAGE_UNIT ATTACH FUNCTION - def attach_stores(n, costs, config): """ @@ -138,7 +136,8 @@ def attach_stores(n, costs, config): 2. where charger and discharger are the same component e.g. Li-battery (inverter) """ carriers = config["electricity"]["extendable_carriers"]["Store"] - + carriers_classic = [x for x in carriers if x == "H2" or x == "battery"] + carriers_database = [x for x in carriers if x != "H2" and x != "battery"] _add_missing_carriers_from_costs(n, costs, carriers) buses_i = n.buses.index @@ -147,7 +146,7 @@ def attach_stores(n, costs, config): ### TODO: GENERAL STORE ATTACH FUNCTION # Combine bicharger model & traditional model? (or even more general e.g. 5 step charger) - if "H2" in carriers: + if "H2" in carriers_classic: h2_buses_i = n.madd("Bus", buses_i + " H2", carrier="H2", **bus_sub_dict) n.madd( @@ -184,7 +183,7 @@ def attach_stores(n, costs, config): marginal_cost=costs.at["fuel cell", "marginal_cost"], ) - if "battery" in carriers: + if "battery" in carriers_classic: b_buses_i = n.madd( "Bus", buses_i + " battery", carrier="battery", **bus_sub_dict ) @@ -225,6 +224,56 @@ def attach_stores(n, costs, config): marginal_cost=costs.at["battery inverter", "marginal_cost"], ) + for c in carriers_database: + carrier_buses_i = n.madd("Bus", buses_i + f" {c}", carrier=c, **bus_sub_dict) + tech_type = costs.technology_type + carrier = costs.carrier + store_filter = (carrier == c) & (tech_type == "store") + charger_or_bicharger_filter = (carrier == c) & ( + (tech_type == "charger") | (tech_type == "bicharger") + ) + discharger_or_bicharger_filter = (carrier == c) & ( + (tech_type == "discharger") | (tech_type == "bicharger") + ) + n.madd( + "Store", + carrier_buses_i, + bus=carrier_buses_i, + carrier=c, + e_nom_extendable=True, + e_cyclic=True, + capital_cost=float(costs.loc[store_filter, "capital_cost"]), + ) + + n.madd( + "Link", + carrier_buses_i + " charger", + bus0=buses_i, + bus1=carrier_buses_i, + carrier=f"{c} charger", + p_nom_extendable=True, + efficiency=float(costs.loc[charger_or_bicharger_filter, "efficiency"]), + capital_cost=float(costs.loc[charger_or_bicharger_filter, "capital_cost"]), + marginal_cost=float(costs.loc[charger_or_bicharger_filter, "marginal_cost"]), + ) + + # capital cost of discharger is None if bicharger since already added in previous link + if costs.loc[discharger_or_bicharger_filter].technology_type.item() == "bicharger": + none_or_value = None + else: + none_or_value = float(costs.loc[discharger_or_bicharger_filter, "capital_cost"]) + n.madd( + "Link", + carrier_buses_i + " discharger", + bus0=carrier_buses_i, + bus1=buses_i, + carrier=f"{c} discharger", + p_nom_extendable=True, + efficiency=float(costs.loc[discharger_or_bicharger_filter, "efficiency"]), + capital_cost=none_or_value, + marginal_cost=float(costs.loc[discharger_or_bicharger_filter, "marginal_cost"]), + ) + def attach_hydrogen_pipelines(n, costs, config): ext_carriers = config["electricity"]["extendable_carriers"] From 9e8d98c66c467aca902d1710eaa44d32019e298d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 17 Jan 2023 22:57:43 +0000 Subject: [PATCH 09/24] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- scripts/add_electricity.py | 8 +++++--- scripts/add_extra_components.py | 21 ++++++++++++++++----- 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index 2b167d8fc..374a620db 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -220,17 +220,19 @@ def costs_for_storageunit(store, link1, link2=pd.DataFrame(), max_hours=1.0): costs = add_storage_col_to_costs(costs, storage_meta_dict, storage_techs) # add capital_cost to all storage units indexed by carrier e.g. "lead" or "concrete" - for c in costs.loc[storage_techs,"carrier"].unique(): + for c in costs.loc[storage_techs, "carrier"].unique(): carrier = costs.carrier tech_type = costs.technology_type store_filter = (carrier == c) & (tech_type == "store") - charger_or_bicharger_filter = (carrier == c) & ((tech_type == "bicharger") | (tech_type == "charger")) + charger_or_bicharger_filter = (carrier == c) & ( + (tech_type == "bicharger") | (tech_type == "charger") + ) discharger_filter = (carrier == c) & (tech_type == "discharger") costs.loc[c] = costs_for_storageunit( costs.loc[store_filter], costs.loc[charger_or_bicharger_filter], costs.loc[discharger_filter], - max_hours=max_hours["battery"], + max_hours=max_hours["battery"], # TODO: max_hours data should be read as costs.loc[carrier==c,max_hours] (easy possible) ) diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index 0192f1be3..db349f0b8 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -118,7 +118,9 @@ def attach_storageunits(n, costs, config): p_nom_extendable=True, capital_cost=costs.at[carrier, "capital_cost"], marginal_cost=costs.at[carrier, "marginal_cost"], - efficiency_store=float(costs.loc[charger_or_bicharger_filter, "efficiency"]), + efficiency_store=float( + costs.loc[charger_or_bicharger_filter, "efficiency"] + ), efficiency_dispatch=float( costs.loc[discharger_or_bicharger_filter, "efficiency"] ), @@ -254,14 +256,21 @@ def attach_stores(n, costs, config): p_nom_extendable=True, efficiency=float(costs.loc[charger_or_bicharger_filter, "efficiency"]), capital_cost=float(costs.loc[charger_or_bicharger_filter, "capital_cost"]), - marginal_cost=float(costs.loc[charger_or_bicharger_filter, "marginal_cost"]), + marginal_cost=float( + costs.loc[charger_or_bicharger_filter, "marginal_cost"] + ), ) # capital cost of discharger is None if bicharger since already added in previous link - if costs.loc[discharger_or_bicharger_filter].technology_type.item() == "bicharger": + if ( + costs.loc[discharger_or_bicharger_filter].technology_type.item() + == "bicharger" + ): none_or_value = None else: - none_or_value = float(costs.loc[discharger_or_bicharger_filter, "capital_cost"]) + none_or_value = float( + costs.loc[discharger_or_bicharger_filter, "capital_cost"] + ) n.madd( "Link", carrier_buses_i + " discharger", @@ -271,7 +280,9 @@ def attach_stores(n, costs, config): p_nom_extendable=True, efficiency=float(costs.loc[discharger_or_bicharger_filter, "efficiency"]), capital_cost=none_or_value, - marginal_cost=float(costs.loc[discharger_or_bicharger_filter, "marginal_cost"]), + marginal_cost=float( + costs.loc[discharger_or_bicharger_filter, "marginal_cost"] + ), ) From 3519720b18bf68c8d7a01a596632b7133e0273ca Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Tue, 17 Jan 2023 23:59:59 +0000 Subject: [PATCH 10/24] add generic storage function in solve_network --- Snakefile | 2 ++ scripts/solve_network.py | 32 ++++++++++++++++++++++++++++++-- 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/Snakefile b/Snakefile index dbaec4782..87bd0eaa2 100644 --- a/Snakefile +++ b/Snakefile @@ -576,6 +576,7 @@ if config["monte_carlo"]["options"].get("add_to_snakefile", False) == False: rule solve_network: input: "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + tech_costs=COSTS, output: "results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", log: @@ -622,6 +623,7 @@ if config["monte_carlo"]["options"].get("add_to_snakefile", False) == True: rule solve_network: input: "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}.nc", + tech_costs=COSTS, output: "results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}.nc", log: diff --git a/scripts/solve_network.py b/scripts/solve_network.py index 953d2f0ae..f5915c2c9 100755 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -82,6 +82,7 @@ import numpy as np import pandas as pd import pypsa +from add_electricity import load_costs from _helpers import configure_logging from pypsa.descriptors import get_switchable_as_dense as get_as_dense from pypsa.linopf import ( @@ -346,6 +347,25 @@ def add_battery_constraints(n): define_constraints(n, lhs, "=", 0, "Link", "charger_ratio") +def add_bicharger_constraints(n, costs): + tech_type = costs.technology_type + carrier_bicharger = list(costs.loc[(tech_type == "bicharger"), "carrier"].unique()) + for c in carrier_bicharger: + nodes = n.buses.index[n.buses.carrier == c] + if nodes.empty or ("Link", "p_nom") not in n.variables.index: + return + link_p_nom = get_var(n, "Link", "p_nom") + lhs = linexpr( + (1, link_p_nom[nodes + " charger"]), + ( + -n.links.loc[nodes + " discharger", "efficiency"].values, + link_p_nom[nodes + " discharger"].values, + ), + ) + contraint_name = f"charger_ratio_{c}" + define_constraints(n, lhs, "=", 0, "Link", contraint_name) + + def extra_functionality(n, snapshots): """ Collects supplementary constraints which will be passed to ``pypsa.linopf.network_lopf``. @@ -354,6 +374,13 @@ def extra_functionality(n, snapshots): """ opts = n.opts config = n.config + Nyears = n.snapshot_weightings.objective.sum() / 8760.0 + costs = load_costs( + snakemake.input.tech_costs, + config["costs"], + config["electricity"], + Nyears, + ) if "BAU" in opts and n.generators.p_nom_extendable.any(): add_BAU_constraints(n, config) if "SAFE" in opts and n.generators.p_nom_extendable.any(): @@ -367,6 +394,7 @@ def extra_functionality(n, snapshots): if "EQ" in o: add_EQ_constraints(n, o) add_battery_constraints(n) + add_bicharger_constraints(n, costs) def solve_network(n, config, opts="", **kwargs): @@ -411,9 +439,9 @@ def solve_network(n, config, opts="", **kwargs): snakemake = mock_snakemake( "solve_network", simpl="", - clusters="54", + clusters="6", ll="copt", - opts="Co2L-1H", + opts="Co2L-4H", ) configure_logging(snakemake) From e1235ddae32516ee99569c27c1945c31c531ab3a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 18 Jan 2023 00:00:16 +0000 Subject: [PATCH 11/24] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- scripts/solve_network.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/solve_network.py b/scripts/solve_network.py index f5915c2c9..bc4114a6d 100755 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -82,8 +82,8 @@ import numpy as np import pandas as pd import pypsa -from add_electricity import load_costs from _helpers import configure_logging +from add_electricity import load_costs from pypsa.descriptors import get_switchable_as_dense as get_as_dense from pypsa.linopf import ( define_constraints, @@ -415,7 +415,7 @@ def solve_network(n, config, opts="", **kwargs): solver_name=solver_name, solver_options=solver_options, extra_functionality=extra_functionality, - **kwargs + **kwargs, ) else: ilopf( @@ -426,7 +426,7 @@ def solve_network(n, config, opts="", **kwargs): min_iterations=min_iterations, max_iterations=max_iterations, extra_functionality=extra_functionality, - **kwargs + **kwargs, ) return n From e7aa91db3e1713b145e6ce249d04e181b1de1a09 Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Thu, 19 Jan 2023 23:37:19 +0000 Subject: [PATCH 12/24] add fixes --- scripts/add_electricity.py | 3 ++- scripts/add_extra_components.py | 6 +++--- scripts/solve_network.py | 6 ++++-- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index 374a620db..786fad98c 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -161,6 +161,7 @@ def load_costs(tech_costs, config, elec_config, Nyears=1): costs = costs.value.unstack().fillna(config["fill_values"]) + # set marginal cost to all components generators, charger, discharger, store, ... costs["capital_cost"] = ( ( calculate_annuity(costs["lifetime"], costs["discount rate"]) @@ -219,7 +220,7 @@ def costs_for_storageunit(store, link1, link2=pd.DataFrame(), max_hours=1.0): storage_meta_dict, storage_techs = nested_storage_dict(tech_costs) costs = add_storage_col_to_costs(costs, storage_meta_dict, storage_techs) - # add capital_cost to all storage units indexed by carrier e.g. "lead" or "concrete" + # add capital_cost to all storage_units indexed by carrier e.g. "lead" or "concrete" for c in costs.loc[storage_techs, "carrier"].unique(): carrier = costs.carrier tech_type = costs.technology_type diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index db349f0b8..aa889d652 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -266,9 +266,9 @@ def attach_stores(n, costs, config): costs.loc[discharger_or_bicharger_filter].technology_type.item() == "bicharger" ): - none_or_value = None + zero_or_value = 0 else: - none_or_value = float( + zero_or_value = float( costs.loc[discharger_or_bicharger_filter, "capital_cost"] ) n.madd( @@ -279,7 +279,7 @@ def attach_stores(n, costs, config): carrier=f"{c} discharger", p_nom_extendable=True, efficiency=float(costs.loc[discharger_or_bicharger_filter, "efficiency"]), - capital_cost=none_or_value, + capital_cost=zero_or_value, marginal_cost=float( costs.loc[discharger_or_bicharger_filter, "marginal_cost"] ), diff --git a/scripts/solve_network.py b/scripts/solve_network.py index f5915c2c9..cf1bdae8b 100755 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -349,8 +349,10 @@ def add_battery_constraints(n): def add_bicharger_constraints(n, costs): tech_type = costs.technology_type - carrier_bicharger = list(costs.loc[(tech_type == "bicharger"), "carrier"].unique()) - for c in carrier_bicharger: + carriers_bicharger = list(costs.loc[(tech_type == "bicharger"), "carrier"].unique()) + carriers_config = n.config["electricity"]["extendable_carriers"]["Store"] + carriers_intersect = list(set(carriers_bicharger) & set(carriers_config)) + for c in carriers_intersect: nodes = n.buses.index[n.buses.carrier == c] if nodes.empty or ("Link", "p_nom") not in n.variables.index: return From bedee1c236def955d95c9d11dd60cb35241c1958 Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Tue, 24 Jan 2023 23:18:41 +0000 Subject: [PATCH 13/24] add workflow structure --- Snakefile | 101 ++++++++++++++++++++++++++++++++++--------- config.default.yaml | 5 +++ config.tutorial.yaml | 5 +++ 3 files changed, 91 insertions(+), 20 deletions(-) diff --git a/Snakefile b/Snakefile index 293f15b38..ae23c67b7 100644 --- a/Snakefile +++ b/Snakefile @@ -633,44 +633,77 @@ def memory(w): return int(factor * (10000 + 195 * int(w.clusters))) -if config["monte_carlo"]["options"].get("add_to_snakefile", False) == False: +if config["monte_carlo"]["options"].get("add_to_snakefile", False) == True: - rule solve_network: + rule monte_carlo: input: "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + output: + "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}.nc", + log: + "logs/" + + RDIR + + "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}.log", + benchmark: + ( + "benchmarks/" + + RDIR + + "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}" + ) + threads: 1 + resources: + mem_mb=4000, + script: + "scripts/monte_carlo.py" + + + rule solve_network: + input: + "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}.nc", tech_costs=COSTS, output: - "results/" + RDIR + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + "results/" + + RDIR + + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}.nc", log: solver=normpath( "logs/" + RDIR - + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log" + + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}_solver.log" ), python="logs/" + RDIR - + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log", + + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}_python.log", memory="logs/" + RDIR - + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log", + + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}_memory.log", benchmark: ( "benchmarks/" + RDIR - + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" + + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}" ) threads: 20 resources: - mem=memory, + mem_mb=memory, shadow: "shallow" script: "scripts/solve_network.py" -if config["monte_carlo"]["options"].get("add_to_snakefile", False) == True: + rule solve_all_networks_monte: + input: + expand( + "results/" + + RDIR + + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}.nc", + **config["scenario"] + ), - rule monte_carlo: +elif config["technology_assessment"]["options"].get("add_to_snakefile", False) == True: + + rule prepare_technology_assessment: input: "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", output: @@ -689,16 +722,8 @@ if config["monte_carlo"]["options"].get("add_to_snakefile", False) == True: resources: mem_mb=4000, script: - "scripts/monte_carlo.py" + "scripts/prepare_technology_assessment.py" - rule solve_monte: - input: - expand( - "networks/" - + RDIR - + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}.nc", - **config["scenario"] - ), rule solve_network: input: @@ -734,7 +759,8 @@ if config["monte_carlo"]["options"].get("add_to_snakefile", False) == True: script: "scripts/solve_network.py" - rule solve_all_networks_monte: + + rule solve_all_networks_tech_assessment: input: expand( "results/" @@ -744,6 +770,41 @@ if config["monte_carlo"]["options"].get("add_to_snakefile", False) == True: ), +else: + + rule solve_network: + input: + "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + tech_costs=COSTS, + output: + "results/" + RDIR + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + log: + solver=normpath( + "logs/" + + RDIR + + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log" + ), + python="logs/" + + RDIR + + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log", + memory="logs/" + + RDIR + + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log", + benchmark: + ( + "benchmarks/" + + RDIR + + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" + ) + threads: 20 + resources: + mem=memory, + shadow: + "shallow" + script: + "scripts/solve_network.py" + + def input_make_summary(w): # It's mildly hacky to include the separate costs input as first entry if w.ll.endswith("all"): diff --git a/config.default.yaml b/config.default.yaml index d357bae15..a0aef10f3 100644 --- a/config.default.yaml +++ b/config.default.yaml @@ -311,6 +311,11 @@ costs: co2: 0. +technology_assessment: + options: + add_to_snakefile: True + + monte_carlo: options: add_to_snakefile: false diff --git a/config.tutorial.yaml b/config.tutorial.yaml index b21a1e6d9..5bf7ef349 100644 --- a/config.tutorial.yaml +++ b/config.tutorial.yaml @@ -307,6 +307,11 @@ costs: co2: 0. +technology_assessment: + options: + add_to_snakefile: True + + monte_carlo: options: add_to_snakefile: false From b4d10325680dc5a6c8f5e083d675f20901054630 Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Wed, 25 Jan 2023 14:12:51 +0000 Subject: [PATCH 14/24] implement feature assessment --- Snakefile | 12 ++- config.default.yaml | 8 +- config.tutorial.yaml | 7 +- scripts/prepare_technology_assessment.py | 102 +++++++++++++++++++++++ 4 files changed, 124 insertions(+), 5 deletions(-) create mode 100644 scripts/prepare_technology_assessment.py diff --git a/Snakefile b/Snakefile index ae23c67b7..ab69b5174 100644 --- a/Snakefile +++ b/Snakefile @@ -33,9 +33,15 @@ config["countries"] = create_country_list(config["countries"]) # create a list of iteration steps, required to solve the experimental design # each value is used as wildcard input e.g. solution_{unc} -config["scenario"]["unc"] = [ - f"m{i}" for i in range(config["monte_carlo"]["options"]["samples"]) -] +if config["monte_carlo"]["options"].get("add_to_snakefile", False) == True: + config["scenario"]["unc"] = [ + f"m{i}" for i in range(config["monte_carlo"]["options"]["samples"]) + ] + +if config["technology_assessment"]["options"].get("add_to_snakefile", False) == True: + config["scenario"]["unc"] = [ + f"f{i}" for i in range(len(config["electricity"]["extendable_carriers"]["Store"])) + ] run = config.get("run", {}) RDIR = run["name"] + "/" if run.get("name") else "" diff --git a/config.default.yaml b/config.default.yaml index a0aef10f3..7703fedf2 100644 --- a/config.default.yaml +++ b/config.default.yaml @@ -313,7 +313,13 @@ costs: technology_assessment: options: - add_to_snakefile: True + add_to_snakefile: false + assessment_strategy: "any-chance" + pypsa_standard: + # deviation from BAU + # only store components are tested + stores.capital_cost: [0.8, 1.2] + monte_carlo: diff --git a/config.tutorial.yaml b/config.tutorial.yaml index 5bf7ef349..3b2fb7115 100644 --- a/config.tutorial.yaml +++ b/config.tutorial.yaml @@ -309,7 +309,12 @@ costs: technology_assessment: options: - add_to_snakefile: True + add_to_snakefile: false + assessment_strategy: "any-chance" + pypsa_standard: + # deviation from BAU + # only store components are tested + stores.capital_cost: [0.8, 1.2] monte_carlo: diff --git a/scripts/prepare_technology_assessment.py b/scripts/prepare_technology_assessment.py new file mode 100644 index 000000000..65e2dbf65 --- /dev/null +++ b/scripts/prepare_technology_assessment.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2022 The PyPSA-Earth Authors +# +# SPDX-License-Identifier: GPL-3.0-or-later +# coding: utf-8 +""" + +""" +import logging +import os +import numpy as np +import pandas as pd +import pypsa +from _helpers import configure_logging + +logger = logging.getLogger(__name__) + + +def single_best_in_worst_list(worst_list, best_list): + """ + Return list with single best value per list of worst values + + Input: + ------ + worst_list: 1D array + best_list: 1D array + + Output: + ------- + new_list: 2D array + + Example: + -------- + >>> single_best_in_worst_list([1, 1, 1], [4, 4, 4]) + [[4, 1, 1], [1, 4, 1], [1, 1, 4]] + """ + new_list = [] + for i in range(len(worst_list)): + l = worst_list.copy() + l[i] = best_list[i] + new_list.append(l) + + return new_list + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + os.chdir(os.path.dirname(os.path.abspath(__file__))) + snakemake = mock_snakemake( + "prepare_technology_assessment", + simpl="", + clusters="6", + ll="copt", + opts="Co2L-4H", + unc="f0", + ) + configure_logging(snakemake) + CONFIG = snakemake.config + n = pypsa.Network(snakemake.input[0]) + + ### SCENARIO INPUTS + ### + PYPSA_FEATURES = { + k: v for k, v in CONFIG["technology_assessment"]["pypsa_standard"].items() if v + } # removes key value pairs with empty value e.g. [] + L_BOUNDS = [item[0] for item in PYPSA_FEATURES.values()] + U_BOUNDS = [item[1] for item in PYPSA_FEATURES.values()] + STRATEGY = CONFIG["technology_assessment"]["options"].get("assessment_strategy") + + + if STRATEGY=="any-chance": + carrier_no = len(n.stores.carrier.unique()) + worst_list = L_BOUNDS * carrier_no + best_list = U_BOUNDS * carrier_no + scenarios = single_best_in_worst_list(worst_list, best_list) + + ### NETWORK CHANGES + ### + unc_wildcards = snakemake.wildcards[-1] + i = int(unc_wildcards[1:]) + for k, _ in PYPSA_FEATURES.items(): + type = k.split(".")[0] # "stores", "generators", ... + feature = k.split(".")[1] # "capital_cost", "efficiency", ... + + if type == "stores": + carrier_list = n.stores.carrier.unique() + j = 0 + for c in carrier_list: + n.stores.loc[n.stores.carrier==c, feature] *= scenarios[i][j] + n.links.loc[n.links.carrier.str.contains("H2"), feature] *= scenarios[i][j] + logger.info(f"Scaled {feature} for carrier={c} of store and links by factor {scenarios[i][j]} in the {i} scenario") + j += 1 + + ### EXPORT AND METADATA + # + scenario_dict = ( + pd.DataFrame(scenarios).rename_axis("iteration").add_suffix("_carrier") + ).to_dict() + n.meta.update(scenario_dict) + n.export_to_netcdf(snakemake.output[0]) From 04366569226066002da48b051aeb8e6617f6738b Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Thu, 26 Jan 2023 15:22:46 +0000 Subject: [PATCH 15/24] add changes and merge monte-carlo scripts --- Snakefile | 87 +----------- config.default.yaml | 25 ++-- config.tutorial.yaml | 24 ++-- doc/configtables/monte-carlo.csv | 16 ++- scripts/monte_carlo.py | 168 +++++++++++++++-------- scripts/prepare_technology_assessment.py | 102 -------------- test/config.monte_carlo.yaml | 3 +- 7 files changed, 147 insertions(+), 278 deletions(-) delete mode 100644 scripts/prepare_technology_assessment.py diff --git a/Snakefile b/Snakefile index ab69b5174..c109d0553 100644 --- a/Snakefile +++ b/Snakefile @@ -14,6 +14,7 @@ from snakemake.remote.HTTP import RemoteProvider as HTTPRemoteProvider from scripts._helpers import create_country_list from scripts.add_electricity import get_load_paths_gegis from scripts.retrieve_databundle_light import datafiles_retrivedatabundle +from scripts.monte_carlo import wildcard_creator HTTP = HTTPRemoteProvider() @@ -30,18 +31,12 @@ configfile: "configs/bundle_config.yaml" # convert country list according to the desired region config["countries"] = create_country_list(config["countries"]) - # create a list of iteration steps, required to solve the experimental design -# each value is used as wildcard input e.g. solution_{unc} -if config["monte_carlo"]["options"].get("add_to_snakefile", False) == True: - config["scenario"]["unc"] = [ - f"m{i}" for i in range(config["monte_carlo"]["options"]["samples"]) - ] - -if config["technology_assessment"]["options"].get("add_to_snakefile", False) == True: - config["scenario"]["unc"] = [ - f"f{i}" for i in range(len(config["electricity"]["extendable_carriers"]["Store"])) - ] +# each value is used as wildcard input e.g. solution_{unc} +if config["monte_carlo"].get("add_to_snakefile", False) == True: + config["scenario"]["unc"] = wildcard_creator( + config, config["monte_carlo"]["options"].get("method") + ) run = config.get("run", {}) RDIR = run["name"] + "/" if run.get("name") else "" @@ -639,7 +634,7 @@ def memory(w): return int(factor * (10000 + 195 * int(w.clusters))) -if config["monte_carlo"]["options"].get("add_to_snakefile", False) == True: +if config["monte_carlo"].get("add_to_snakefile", False) == True: rule monte_carlo: input: @@ -707,74 +702,6 @@ if config["monte_carlo"]["options"].get("add_to_snakefile", False) == True: **config["scenario"] ), -elif config["technology_assessment"]["options"].get("add_to_snakefile", False) == True: - - rule prepare_technology_assessment: - input: - "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", - output: - "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}.nc", - log: - "logs/" - + RDIR - + "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}.log", - benchmark: - ( - "benchmarks/" - + RDIR - + "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}" - ) - threads: 1 - resources: - mem_mb=4000, - script: - "scripts/prepare_technology_assessment.py" - - - rule solve_network: - input: - "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}.nc", - tech_costs=COSTS, - output: - "results/" - + RDIR - + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}.nc", - log: - solver=normpath( - "logs/" - + RDIR - + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}_solver.log" - ), - python="logs/" - + RDIR - + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}_python.log", - memory="logs/" - + RDIR - + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}_memory.log", - benchmark: - ( - "benchmarks/" - + RDIR - + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}" - ) - threads: 20 - resources: - mem_mb=memory, - shadow: - "shallow" - script: - "scripts/solve_network.py" - - - rule solve_all_networks_tech_assessment: - input: - expand( - "results/" - + RDIR - + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}.nc", - **config["scenario"] - ), - else: diff --git a/config.default.yaml b/config.default.yaml index 7703fedf2..025b05fda 100644 --- a/config.default.yaml +++ b/config.default.yaml @@ -311,30 +311,21 @@ costs: co2: 0. -technology_assessment: - options: - add_to_snakefile: false - assessment_strategy: "any-chance" - pypsa_standard: - # deviation from BAU - # only store components are tested - stores.capital_cost: [0.8, 1.2] - - - monte_carlo: + add_to_snakefile: True options: - add_to_snakefile: false - samples: 7 # number of optimizations - sampling_strategy: "chaospy" # "pydoe2", "chaospy", "scipy", packages that are supported + method: "global_sensitivity" # or any_chance_store_test + samples: 7 # number of optimizations + sampling_strategy: "chaospy" # "pydoe2", "chaospy", "scipy", packages that are supported pypsa_standard: # User can add here flexibly more features for the Monte-Carlo sampling. # Given as "key: value" format # Key: add below the pypsa object for the monte_carlo sampling, "network" is only allowed for filtering! # Value: currently supported format [l_bound, u_bound] or empty [], represent multiplication factors for the object - loads_t.p_set: [0.9, 1.1] - generators_t.p_max_pu.loc[:, n.generators.carrier == "wind"]: [0.9, 1.1] - generators_t.p_max_pu.loc[:, n.generators.carrier == "solar"]: [0.9, 1.1] + # Examples: + # loads_t.p_set: [0.9, 1.1] + # generators_t.p_max_pu.loc[:, n.generators.carrier == "wind"]: [0.9, 1.1] + stores.capital_cost: [0.8, 1.2] # for any_chance_store_test solving: diff --git a/config.tutorial.yaml b/config.tutorial.yaml index 3b2fb7115..9b22c9c48 100644 --- a/config.tutorial.yaml +++ b/config.tutorial.yaml @@ -307,29 +307,21 @@ costs: co2: 0. -technology_assessment: - options: - add_to_snakefile: false - assessment_strategy: "any-chance" - pypsa_standard: - # deviation from BAU - # only store components are tested - stores.capital_cost: [0.8, 1.2] - - monte_carlo: + add_to_snakefile: True options: - add_to_snakefile: false - samples: 7 # number of optimizations - sampling_strategy: "chaospy" # "pydoe2", "chaospy", "scipy", packages that are supported + method: "global_sensitivity" # or any_chance_store_test + samples: 7 # number of optimizations + sampling_strategy: "chaospy" # "pydoe2", "chaospy", "scipy", packages that are supported pypsa_standard: # User can add here flexibly more features for the Monte-Carlo sampling. # Given as "key: value" format # Key: add below the pypsa object for the monte_carlo sampling, "network" is only allowed for filtering! # Value: currently supported format [l_bound, u_bound] or empty [], represent multiplication factors for the object - loads_t.p_set: [0.9, 1.1] - generators_t.p_max_pu.loc[:, n.generators.carrier == "wind"]: [0.9, 1.1] - generators_t.p_max_pu.loc[:, n.generators.carrier == "solar"]: [0.9, 1.1] + # Examples: + # loads_t.p_set: [0.9, 1.1] + # generators_t.p_max_pu.loc[:, n.generators.carrier == "wind"]: [0.9, 1.1] + stores.capital_cost: [0.8, 1.2] # for any_chance_store_test solving: diff --git a/doc/configtables/monte-carlo.csv b/doc/configtables/monte-carlo.csv index efde31bd0..c641bd93e 100644 --- a/doc/configtables/monte-carlo.csv +++ b/doc/configtables/monte-carlo.csv @@ -1,7 +1,9 @@ -,Unit,Values,Description -options,,, --- add_to_snakemake,,true or false,Add rule to Snakefile or not --- samples,,"int** read description!", "Defines the number of total sample networks that will be later optimized. if orthogonal latin hypercube sampling is applied (default option in scripts), then a prime number, that is not a quadrat of a prime, needs to be chosen. E.g. 7 not 9 (3^2)" --- sampling_strategy,,"Any subset of {""pydoe2"", ""chaospy"", ""scipy""}",Current supported packages to create an experimental design -pypsa_standard,,, --- ,MW/MWh,"[l_bound, u_bound] or empty []","`Key` is a dynamic PyPSA object that allows to access any pypsa object such as `loads_t.p_set` or the max. wind generation per hour `generators_t.p_max_pu.loc[:, n.generators.carrier == ""wind""]`. `Values` or bounds are multiplication for each object." +,Unit,Values,Description,,, +add_to_snakemake,,true or false,Add rule to Snakefile or not,,, +options,,,,,, +-- method,,"""global_sensitivity"" or ""any_chance_store_test""","""global_sensitivity"" create scenarios in latin hypercube space, or ""any_chance_store_test"" create scenarios where all values are pessimistic and only one is optimistic""global_sensitivity"" create scenarios in latin hypercube space, or ""any_chance_store_test"" create scenarios where all values are pessimistic and only one is optimistic",,, +-- samples,,integer," Defines the number of total sample networks that will be later optimized. if orthogonal latin hypercube sampling is applied (default option in scripts) then a prime number that is not a quadrat of a prime needs to be chosen. E.g. 7 not 9 (3^2)""",,, +-- sampling_strategy,,"Any subset of {""pydoe2"", ""chaospy"", ""scipy""}",Current supported packages to create an experimental design,,, +pypsa_standard,,,,,, +-- ,MW/MWh,"[l_bound, u_bound] or empty []","`Key` is a dynamic PyPSA object that allows to access any pypsa object such as `loads_t.p_set` or the max. wind generation per hour `generators_t.p_max_pu.loc[:, n.generators.carrier == ""wind""]`. `Values` or bounds are multiplication for each object. Method ""any_chance_store_test"" only supports e.g. n.stores = [0.7,1.3]",,, +,,,,,, \ No newline at end of file diff --git a/scripts/monte_carlo.py b/scripts/monte_carlo.py index 1059845ed..3e0c1bff8 100644 --- a/scripts/monte_carlo.py +++ b/scripts/monte_carlo.py @@ -77,6 +77,17 @@ logger = logging.getLogger(__name__) +def wildcard_creator(config, method=None): + """ + Creates wildcard for monte-carlo simulations. + """ + if method=="global_sensitivity": + return [f"g{i}" for i in range(config["monte_carlo"]["options"]["samples"])] + + elif method=="any_chance_store_test": + return [f"a{i}" for i in range(len(config["electricity"]["extendable_carriers"]["Store"]))] + + def monte_carlo_sampling_pydoe2( N_FEATURES, SAMPLES, @@ -158,6 +169,33 @@ def monte_carlo_sampling_scipy( return lh +def single_best_in_worst_list(worst_list, best_list): + """ + Return list with single best value per list of worst values + + Input: + ------ + worst_list: 1D array + best_list: 1D array + + Output: + ------- + new_list: 2D array + + Example: + -------- + >>> single_best_in_worst_list([1, 1, 1], [4, 4, 4]) + [[4, 1, 1], [1, 4, 1], [1, 1, 4]] + """ + new_list = [] + for i in range(len(worst_list)): + l = worst_list.copy() + l[i] = best_list[i] + new_list.append(l) + + return new_list + + if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake @@ -173,72 +211,94 @@ def monte_carlo_sampling_scipy( ) configure_logging(snakemake) config = snakemake.config + n = pypsa.Network(snakemake.input[0]) - ### SCENARIO INPUTS - ### - MONTE_CARLO_PYPSA_FEATURES = { + PYPSA_FEATURES = { k: v for k, v in config["monte_carlo"]["pypsa_standard"].items() if v } # removes key value pairs with empty value e.g. [] - MONTE_CARLO_OPTIONS = config["monte_carlo"]["options"] - L_BOUNDS = [item[0] for item in MONTE_CARLO_PYPSA_FEATURES.values()] - U_BOUNDS = [item[1] for item in MONTE_CARLO_PYPSA_FEATURES.values()] + OPTIONS = config["monte_carlo"]["options"] + L_BOUNDS = [item[0] for item in PYPSA_FEATURES.values()] + U_BOUNDS = [item[1] for item in PYPSA_FEATURES.values()] N_FEATURES = len( - MONTE_CARLO_PYPSA_FEATURES + PYPSA_FEATURES ) # only counts features when specified in config - SAMPLES = MONTE_CARLO_OPTIONS.get( - "samples" - ) # TODO: What is the optimal sampling? Fabian Neumann answered that in "Broad ranges" paper - SAMPLING_STRATEGY = MONTE_CARLO_OPTIONS.get("sampling_strategy") - ### SCENARIO CREATION / SAMPLING STRATEGY + ### SCENARIO CREATION ### - if SAMPLING_STRATEGY == "pydoe2": - lh = monte_carlo_sampling_pydoe2( - N_FEATURES, - SAMPLES, - criterion=None, - iteration=None, - random_state=42, - correlation_matrix=None, - ) - if SAMPLING_STRATEGY == "scipy": - lh = monte_carlo_sampling_scipy( - N_FEATURES, - SAMPLES, - centered=False, - strength=2, - optimization=None, - seed=42, - ) - if SAMPLING_STRATEGY == "chaospy": - lh = monte_carlo_sampling_chaospy( - N_FEATURES, - SAMPLES, - rule="latin_hypercube", - seed=42, - ) - lh_scaled = qmc.scale(lh, L_BOUNDS, U_BOUNDS) - - ### MONTE-CARLO MODIFICATIONS + if OPTIONS.get("method")=="global_sensitivity": + SAMPLES = OPTIONS.get( + "samples" + ) # TODO: What is the optimal sampling? Fabian Neumann answered that in "Broad ranges" paper + SAMPLING_STRATEGY = OPTIONS.get("sampling_strategy") + + if SAMPLING_STRATEGY == "pydoe2": + lh = monte_carlo_sampling_pydoe2( + N_FEATURES, + SAMPLES, + criterion=None, + iteration=None, + random_state=42, + correlation_matrix=None, + ) + if SAMPLING_STRATEGY == "scipy": + lh = monte_carlo_sampling_scipy( + N_FEATURES, + SAMPLES, + centered=False, + strength=2, + optimization=None, + seed=42, + ) + if SAMPLING_STRATEGY == "chaospy": + lh = monte_carlo_sampling_chaospy( + N_FEATURES, + SAMPLES, + rule="latin_hypercube", + seed=42, + ) + scenarios = qmc.scale(lh, L_BOUNDS, U_BOUNDS) + + elif OPTIONS.get("method")=="any_chance_store_test": + carrier_no = len(n.stores.carrier.unique()) + worst_list = L_BOUNDS * carrier_no + best_list = U_BOUNDS * carrier_no + scenarios = single_best_in_worst_list(worst_list, best_list) + + ### NETWORK MODIFICATION ### - n = pypsa.Network(snakemake.input[0]) unc_wildcards = snakemake.wildcards[-1] i = int(unc_wildcards[1:]) j = 0 - for k, v in MONTE_CARLO_PYPSA_FEATURES.items(): - # this loop sets in one scenario each "i" feature assumption - # k is the config input key "loads_t.p_set" - # v is the lower and upper bound [0.8,1.3], that was used for lh_scaled - # i, j interation number to pick values of experimental setup - # Example: n.loads_t.p_set = network.loads_t.p_set = .loads_t.p_set * lh_scaled[0,0] - exec(f"n.{k} = n.{k} * {lh_scaled[i,j]}") - logger.info(f"Scaled n.{k} by factor {lh_scaled[i,j]} in the {i} scenario") - j = j + 1 + + if OPTIONS.get("method")=="global_sensitivity": + for k, v in PYPSA_FEATURES.items(): + # this loop sets in one scenario each "i" feature assumption + # k is the config input key "loads_t.p_set" + # v is the lower and upper bound [0.8,1.3], that was used for scenarios + # i, j interation number to pick values of experimental setup + # Example: n.loads_t.p_set = n.loads_t.p_set * scenarios[0,0] + exec(f"n.{k} = n.{k} * {scenarios[i,j]}") + logger.info(f"Scaled n.{k} by factor {scenarios[i,j]} in the {i} scenario") + j = j + 1 + + if OPTIONS.get("method")=="any_chance_store_test": + for k, _ in PYPSA_FEATURES.items(): + type = k.split(".")[0] # "stores", "generators", ... + feature = k.split(".")[1] # "capital_cost", "efficiency", ... + + if type == "stores": + # scales the whole storage-chain + carrier_list = n.stores.carrier.unique() + for c in carrier_list: + n.stores.loc[n.stores.carrier==c, feature] *= scenarios[i][j] + n.links.loc[n.links.carrier.str.contains("H2"), feature] *= scenarios[i][j] + logger.info(f"Scaled {feature} for carrier={c} of store and links by factor {scenarios[i][j]} in the {i} scenario") + j += 1 ### EXPORT AND METADATA - # - latin_hypercube_dict = ( - pd.DataFrame(lh_scaled).rename_axis("Nruns").add_suffix("_feature") + ### + scenario_dict = ( + pd.DataFrame(scenarios).rename_axis("iteration").add_suffix("_feature") ).to_dict() - n.meta.update(latin_hypercube_dict) + n.meta.update(scenario_dict) n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/prepare_technology_assessment.py b/scripts/prepare_technology_assessment.py deleted file mode 100644 index 65e2dbf65..000000000 --- a/scripts/prepare_technology_assessment.py +++ /dev/null @@ -1,102 +0,0 @@ -# -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2022 The PyPSA-Earth Authors -# -# SPDX-License-Identifier: GPL-3.0-or-later -# coding: utf-8 -""" - -""" -import logging -import os -import numpy as np -import pandas as pd -import pypsa -from _helpers import configure_logging - -logger = logging.getLogger(__name__) - - -def single_best_in_worst_list(worst_list, best_list): - """ - Return list with single best value per list of worst values - - Input: - ------ - worst_list: 1D array - best_list: 1D array - - Output: - ------- - new_list: 2D array - - Example: - -------- - >>> single_best_in_worst_list([1, 1, 1], [4, 4, 4]) - [[4, 1, 1], [1, 4, 1], [1, 1, 4]] - """ - new_list = [] - for i in range(len(worst_list)): - l = worst_list.copy() - l[i] = best_list[i] - new_list.append(l) - - return new_list - - -if __name__ == "__main__": - if "snakemake" not in globals(): - from _helpers import mock_snakemake - - os.chdir(os.path.dirname(os.path.abspath(__file__))) - snakemake = mock_snakemake( - "prepare_technology_assessment", - simpl="", - clusters="6", - ll="copt", - opts="Co2L-4H", - unc="f0", - ) - configure_logging(snakemake) - CONFIG = snakemake.config - n = pypsa.Network(snakemake.input[0]) - - ### SCENARIO INPUTS - ### - PYPSA_FEATURES = { - k: v for k, v in CONFIG["technology_assessment"]["pypsa_standard"].items() if v - } # removes key value pairs with empty value e.g. [] - L_BOUNDS = [item[0] for item in PYPSA_FEATURES.values()] - U_BOUNDS = [item[1] for item in PYPSA_FEATURES.values()] - STRATEGY = CONFIG["technology_assessment"]["options"].get("assessment_strategy") - - - if STRATEGY=="any-chance": - carrier_no = len(n.stores.carrier.unique()) - worst_list = L_BOUNDS * carrier_no - best_list = U_BOUNDS * carrier_no - scenarios = single_best_in_worst_list(worst_list, best_list) - - ### NETWORK CHANGES - ### - unc_wildcards = snakemake.wildcards[-1] - i = int(unc_wildcards[1:]) - for k, _ in PYPSA_FEATURES.items(): - type = k.split(".")[0] # "stores", "generators", ... - feature = k.split(".")[1] # "capital_cost", "efficiency", ... - - if type == "stores": - carrier_list = n.stores.carrier.unique() - j = 0 - for c in carrier_list: - n.stores.loc[n.stores.carrier==c, feature] *= scenarios[i][j] - n.links.loc[n.links.carrier.str.contains("H2"), feature] *= scenarios[i][j] - logger.info(f"Scaled {feature} for carrier={c} of store and links by factor {scenarios[i][j]} in the {i} scenario") - j += 1 - - ### EXPORT AND METADATA - # - scenario_dict = ( - pd.DataFrame(scenarios).rename_axis("iteration").add_suffix("_carrier") - ).to_dict() - n.meta.update(scenario_dict) - n.export_to_netcdf(snakemake.output[0]) diff --git a/test/config.monte_carlo.yaml b/test/config.monte_carlo.yaml index 034733fb4..804a0b207 100644 --- a/test/config.monte_carlo.yaml +++ b/test/config.monte_carlo.yaml @@ -7,5 +7,4 @@ retrieve_databundle: # required to be "false" for nice CI test output show_progress: false monte_carlo: - options: - add_to_snakefile: true + add_to_snakefile: true From 897ed026c69201f5b683a36775db040894672f4b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 26 Jan 2023 17:04:43 +0000 Subject: [PATCH 16/24] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- Snakefile | 5 +--- config.default.yaml | 2 +- config.tutorial.yaml | 2 +- doc/configtables/monte-carlo.csv | 2 +- scripts/monte_carlo.py | 47 ++++++++++++++++++-------------- 5 files changed, 30 insertions(+), 28 deletions(-) diff --git a/Snakefile b/Snakefile index 0a9e06d82..535b1847f 100644 --- a/Snakefile +++ b/Snakefile @@ -32,7 +32,7 @@ configfile: "configs/bundle_config.yaml" config["countries"] = create_country_list(config["countries"]) # create a list of iteration steps, required to solve the experimental design -# each value is used as wildcard input e.g. solution_{unc} +# each value is used as wildcard input e.g. solution_{unc} if config["monte_carlo"].get("add_to_snakefile", False) == True: config["scenario"]["unc"] = wildcard_creator( config, config["monte_carlo"]["options"].get("method") @@ -657,7 +657,6 @@ if config["monte_carlo"].get("add_to_snakefile", False) == True: script: "scripts/monte_carlo.py" - rule solve_network: input: "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}.nc", @@ -692,7 +691,6 @@ if config["monte_carlo"].get("add_to_snakefile", False) == True: script: "scripts/solve_network.py" - rule solve_all_networks_monte: input: expand( @@ -702,7 +700,6 @@ if config["monte_carlo"].get("add_to_snakefile", False) == True: **config["scenario"] ), - else: rule solve_network: diff --git a/config.default.yaml b/config.default.yaml index 025b05fda..5ac58358f 100644 --- a/config.default.yaml +++ b/config.default.yaml @@ -312,7 +312,7 @@ costs: monte_carlo: - add_to_snakefile: True + add_to_snakefile: true options: method: "global_sensitivity" # or any_chance_store_test samples: 7 # number of optimizations diff --git a/config.tutorial.yaml b/config.tutorial.yaml index 9b22c9c48..cc50abfb3 100644 --- a/config.tutorial.yaml +++ b/config.tutorial.yaml @@ -308,7 +308,7 @@ costs: monte_carlo: - add_to_snakefile: True + add_to_snakefile: true options: method: "global_sensitivity" # or any_chance_store_test samples: 7 # number of optimizations diff --git a/doc/configtables/monte-carlo.csv b/doc/configtables/monte-carlo.csv index c641bd93e..689adaf2c 100644 --- a/doc/configtables/monte-carlo.csv +++ b/doc/configtables/monte-carlo.csv @@ -6,4 +6,4 @@ options,,,,,, -- sampling_strategy,,"Any subset of {""pydoe2"", ""chaospy"", ""scipy""}",Current supported packages to create an experimental design,,, pypsa_standard,,,,,, -- ,MW/MWh,"[l_bound, u_bound] or empty []","`Key` is a dynamic PyPSA object that allows to access any pypsa object such as `loads_t.p_set` or the max. wind generation per hour `generators_t.p_max_pu.loc[:, n.generators.carrier == ""wind""]`. `Values` or bounds are multiplication for each object. Method ""any_chance_store_test"" only supports e.g. n.stores = [0.7,1.3]",,, -,,,,,, \ No newline at end of file +,,,,,, diff --git a/scripts/monte_carlo.py b/scripts/monte_carlo.py index 3e0c1bff8..5477048d3 100644 --- a/scripts/monte_carlo.py +++ b/scripts/monte_carlo.py @@ -81,11 +81,14 @@ def wildcard_creator(config, method=None): """ Creates wildcard for monte-carlo simulations. """ - if method=="global_sensitivity": + if method == "global_sensitivity": return [f"g{i}" for i in range(config["monte_carlo"]["options"]["samples"])] - - elif method=="any_chance_store_test": - return [f"a{i}" for i in range(len(config["electricity"]["extendable_carriers"]["Store"]))] + + elif method == "any_chance_store_test": + return [ + f"a{i}" + for i in range(len(config["electricity"]["extendable_carriers"]["Store"])) + ] def monte_carlo_sampling_pydoe2( @@ -172,16 +175,16 @@ def monte_carlo_sampling_scipy( def single_best_in_worst_list(worst_list, best_list): """ Return list with single best value per list of worst values - + Input: ------ worst_list: 1D array best_list: 1D array - + Output: ------- new_list: 2D array - + Example: -------- >>> single_best_in_worst_list([1, 1, 1], [4, 4, 4]) @@ -192,7 +195,7 @@ def single_best_in_worst_list(worst_list, best_list): l = worst_list.copy() l[i] = best_list[i] new_list.append(l) - + return new_list @@ -219,16 +222,14 @@ def single_best_in_worst_list(worst_list, best_list): OPTIONS = config["monte_carlo"]["options"] L_BOUNDS = [item[0] for item in PYPSA_FEATURES.values()] U_BOUNDS = [item[1] for item in PYPSA_FEATURES.values()] - N_FEATURES = len( - PYPSA_FEATURES - ) # only counts features when specified in config + N_FEATURES = len(PYPSA_FEATURES) # only counts features when specified in config ### SCENARIO CREATION ### - if OPTIONS.get("method")=="global_sensitivity": + if OPTIONS.get("method") == "global_sensitivity": SAMPLES = OPTIONS.get( "samples" - ) # TODO: What is the optimal sampling? Fabian Neumann answered that in "Broad ranges" paper + ) # TODO: What is the optimal sampling? Fabian Neumann answered that in "Broad ranges" paper SAMPLING_STRATEGY = OPTIONS.get("sampling_strategy") if SAMPLING_STRATEGY == "pydoe2": @@ -258,7 +259,7 @@ def single_best_in_worst_list(worst_list, best_list): ) scenarios = qmc.scale(lh, L_BOUNDS, U_BOUNDS) - elif OPTIONS.get("method")=="any_chance_store_test": + elif OPTIONS.get("method") == "any_chance_store_test": carrier_no = len(n.stores.carrier.unique()) worst_list = L_BOUNDS * carrier_no best_list = U_BOUNDS * carrier_no @@ -270,7 +271,7 @@ def single_best_in_worst_list(worst_list, best_list): i = int(unc_wildcards[1:]) j = 0 - if OPTIONS.get("method")=="global_sensitivity": + if OPTIONS.get("method") == "global_sensitivity": for k, v in PYPSA_FEATURES.items(): # this loop sets in one scenario each "i" feature assumption # k is the config input key "loads_t.p_set" @@ -281,18 +282,22 @@ def single_best_in_worst_list(worst_list, best_list): logger.info(f"Scaled n.{k} by factor {scenarios[i,j]} in the {i} scenario") j = j + 1 - if OPTIONS.get("method")=="any_chance_store_test": + if OPTIONS.get("method") == "any_chance_store_test": for k, _ in PYPSA_FEATURES.items(): - type = k.split(".")[0] # "stores", "generators", ... - feature = k.split(".")[1] # "capital_cost", "efficiency", ... + type = k.split(".")[0] # "stores", "generators", ... + feature = k.split(".")[1] # "capital_cost", "efficiency", ... if type == "stores": # scales the whole storage-chain carrier_list = n.stores.carrier.unique() for c in carrier_list: - n.stores.loc[n.stores.carrier==c, feature] *= scenarios[i][j] - n.links.loc[n.links.carrier.str.contains("H2"), feature] *= scenarios[i][j] - logger.info(f"Scaled {feature} for carrier={c} of store and links by factor {scenarios[i][j]} in the {i} scenario") + n.stores.loc[n.stores.carrier == c, feature] *= scenarios[i][j] + n.links.loc[n.links.carrier.str.contains("H2"), feature] *= scenarios[ + i + ][j] + logger.info( + f"Scaled {feature} for carrier={c} of store and links by factor {scenarios[i][j]} in the {i} scenario" + ) j += 1 ### EXPORT AND METADATA From bdf606db7188e83f99872acc213692467200c03a Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Thu, 26 Jan 2023 19:50:54 +0000 Subject: [PATCH 17/24] add reviewed changes --- Snakefile | 4 +--- config.default.yaml | 4 ++-- config.tutorial.yaml | 4 ++-- doc/configtables/monte-carlo.csv | 2 +- scripts/_helpers.py | 13 +++++++++++-- scripts/monte_carlo.py | 7 ++++--- 6 files changed, 21 insertions(+), 13 deletions(-) diff --git a/Snakefile b/Snakefile index 535b1847f..86a0bc478 100644 --- a/Snakefile +++ b/Snakefile @@ -34,9 +34,7 @@ config["countries"] = create_country_list(config["countries"]) # create a list of iteration steps, required to solve the experimental design # each value is used as wildcard input e.g. solution_{unc} if config["monte_carlo"].get("add_to_snakefile", False) == True: - config["scenario"]["unc"] = wildcard_creator( - config, config["monte_carlo"]["options"].get("method") - ) + config["scenario"]["unc"] = wildcard_creator(config) run = config.get("run", {}) RDIR = run["name"] + "/" if run.get("name") else "" diff --git a/config.default.yaml b/config.default.yaml index 5ac58358f..50def3c87 100644 --- a/config.default.yaml +++ b/config.default.yaml @@ -314,7 +314,7 @@ costs: monte_carlo: add_to_snakefile: true options: - method: "global_sensitivity" # or any_chance_store_test + method: "global_sensitivity" # or single_best_in_worst for technology assessment samples: 7 # number of optimizations sampling_strategy: "chaospy" # "pydoe2", "chaospy", "scipy", packages that are supported pypsa_standard: @@ -325,7 +325,7 @@ monte_carlo: # Examples: # loads_t.p_set: [0.9, 1.1] # generators_t.p_max_pu.loc[:, n.generators.carrier == "wind"]: [0.9, 1.1] - stores.capital_cost: [0.8, 1.2] # for any_chance_store_test + stores.capital_cost: [0.8, 1.2] # for single_best_in_worst solving: diff --git a/config.tutorial.yaml b/config.tutorial.yaml index cc50abfb3..46535d20f 100644 --- a/config.tutorial.yaml +++ b/config.tutorial.yaml @@ -310,7 +310,7 @@ costs: monte_carlo: add_to_snakefile: true options: - method: "global_sensitivity" # or any_chance_store_test + method: "global_sensitivity" # or single_best_in_worst for technology assessment samples: 7 # number of optimizations sampling_strategy: "chaospy" # "pydoe2", "chaospy", "scipy", packages that are supported pypsa_standard: @@ -321,7 +321,7 @@ monte_carlo: # Examples: # loads_t.p_set: [0.9, 1.1] # generators_t.p_max_pu.loc[:, n.generators.carrier == "wind"]: [0.9, 1.1] - stores.capital_cost: [0.8, 1.2] # for any_chance_store_test + stores.capital_cost: [0.8, 1.2] # for single_best_in_worst solving: diff --git a/doc/configtables/monte-carlo.csv b/doc/configtables/monte-carlo.csv index 689adaf2c..8233302d8 100644 --- a/doc/configtables/monte-carlo.csv +++ b/doc/configtables/monte-carlo.csv @@ -1,7 +1,7 @@ ,Unit,Values,Description,,, add_to_snakemake,,true or false,Add rule to Snakefile or not,,, options,,,,,, --- method,,"""global_sensitivity"" or ""any_chance_store_test""","""global_sensitivity"" create scenarios in latin hypercube space, or ""any_chance_store_test"" create scenarios where all values are pessimistic and only one is optimistic""global_sensitivity"" create scenarios in latin hypercube space, or ""any_chance_store_test"" create scenarios where all values are pessimistic and only one is optimistic",,, +-- method,,"""global_sensitivity"" or ""single_best_in_worst""","""global_sensitivity"" create scenarios in latin hypercube space, or ""single_best_in_worst"" create scenarios where all values are pessimistic and only one is optimistic,,, -- samples,,integer," Defines the number of total sample networks that will be later optimized. if orthogonal latin hypercube sampling is applied (default option in scripts) then a prime number that is not a quadrat of a prime needs to be chosen. E.g. 7 not 9 (3^2)""",,, -- sampling_strategy,,"Any subset of {""pydoe2"", ""chaospy"", ""scipy""}",Current supported packages to create an experimental design,,, pypsa_standard,,,,,, diff --git a/scripts/_helpers.py b/scripts/_helpers.py index 055e81bfa..bb7ddc12a 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -654,14 +654,14 @@ def nested_storage_dict(tech_costs): storage_dict = ast.literal_eval( df.iloc[i, 0] ) # https://stackoverflow.com/a/988251/13573820 - _ = storage_dict.pop("note", None) + storage_dict.pop("note", None) nested_storage_dict[df.index[i]] = storage_dict return [nested_storage_dict, storage_techs] def add_storage_col_to_costs(costs, storage_meta_dict, storage_techs): """ - Add storage specific columns to costs.csv. + Add storage specific columns e.g. "carrier", "type", "technology_type" to costs.csv Input: ------ @@ -672,6 +672,15 @@ def add_storage_col_to_costs(costs, storage_meta_dict, storage_techs): Output: ------- costs: pd.DataFrame, costs.csv with added storage specific columns + + Example: + -------- + From the nested dictionary: + { + 'Ni-Zn-bicharger': {'carrier': ['elec', 'nizn', 'elec'], 'technology_type': ['bicharger'], 'type': ['electrochemical']} + ... + } + The columns "carrier", "type", "technology_type" will be added to costs.csv """ # add storage specific columns to costs.csv for c in ["carrier", "technology_type", "type"]: diff --git a/scripts/monte_carlo.py b/scripts/monte_carlo.py index 5477048d3..b4f95a85f 100644 --- a/scripts/monte_carlo.py +++ b/scripts/monte_carlo.py @@ -81,10 +81,11 @@ def wildcard_creator(config, method=None): """ Creates wildcard for monte-carlo simulations. """ + method = config["monte_carlo"]["options"].get("method") if method == "global_sensitivity": return [f"g{i}" for i in range(config["monte_carlo"]["options"]["samples"])] - elif method == "any_chance_store_test": + elif method == "single_best_in_worst": return [ f"a{i}" for i in range(len(config["electricity"]["extendable_carriers"]["Store"])) @@ -259,7 +260,7 @@ def single_best_in_worst_list(worst_list, best_list): ) scenarios = qmc.scale(lh, L_BOUNDS, U_BOUNDS) - elif OPTIONS.get("method") == "any_chance_store_test": + elif OPTIONS.get("method") == "single_best_in_worst": carrier_no = len(n.stores.carrier.unique()) worst_list = L_BOUNDS * carrier_no best_list = U_BOUNDS * carrier_no @@ -282,7 +283,7 @@ def single_best_in_worst_list(worst_list, best_list): logger.info(f"Scaled n.{k} by factor {scenarios[i,j]} in the {i} scenario") j = j + 1 - if OPTIONS.get("method") == "any_chance_store_test": + if OPTIONS.get("method") == "single_best_in_worst": for k, _ in PYPSA_FEATURES.items(): type = k.split(".")[0] # "stores", "generators", ... feature = k.split(".")[1] # "capital_cost", "efficiency", ... From 72eda7910f649ad06cbe8f52f1faa199a4cb8c31 Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Thu, 26 Jan 2023 21:12:42 +0000 Subject: [PATCH 18/24] add docstring --- scripts/_helpers.py | 7 +++++++ scripts/add_extra_components.py | 10 +++++++--- scripts/monte_carlo.py | 25 ++++++++++++++++++++----- 3 files changed, 34 insertions(+), 8 deletions(-) diff --git a/scripts/_helpers.py b/scripts/_helpers.py index bb7ddc12a..1cc747aad 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -614,6 +614,13 @@ def nested_storage_dict(tech_costs): """ Create a nested dictionary with a storage index and meta data relation. + The costs.csv file from the technology_data interface contains metadata + for storage technologies when the PNNL data extractions is activated PR #67. + The metadata is stored in a csv as a nested dictionary value which is + read out by this function and converted to a nested dictionary for further + use. One example use the metadata enables is the automatic energy storage + creation in the model from the config.yaml. + Input: ------ tech_costs: str, path to technology costs.csv file diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index 6710e3765..d972e2c7a 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -83,6 +83,7 @@ def attach_storageunits(n, costs, config): lookup_store = {"H2": "electrolysis", "battery": "battery inverter"} lookup_dispatch = {"H2": "fuel cell", "battery": "battery inverter"} + # TODO: add metadata to H2 and battery to only read in over database interface for carrier in carriers_classic: roundtrip_correction = 0.5 if carrier == "battery" else 1 n.madd( @@ -102,6 +103,7 @@ def attach_storageunits(n, costs, config): cyclic_state_of_charge=True, ) + # automated attach of storage_units from database for carrier in carriers_database: tech_type = costs.technology_type charger_or_bicharger_filter = (costs.carrier == carrier) & ( @@ -146,9 +148,7 @@ def attach_stores(n, costs, config): buses_i = n.buses.index bus_sub_dict = {k: n.buses[k].values for k in ["x", "y", "country"]} - ### TODO: GENERAL STORE ATTACH FUNCTION - # Combine bicharger model & traditional model? (or even more general e.g. 5 step charger) - + # TODO: add metadata to H2 and battery to only read in over database interface if "H2" in carriers_classic: h2_buses_i = n.madd("Bus", buses_i + " H2", carrier="H2", **bus_sub_dict) @@ -227,14 +227,18 @@ def attach_stores(n, costs, config): marginal_cost=costs.at["battery inverter", "marginal_cost"], ) + # automated attach of store-link storages from database for c in carriers_database: carrier_buses_i = n.madd("Bus", buses_i + f" {c}", carrier=c, **bus_sub_dict) tech_type = costs.technology_type carrier = costs.carrier + # filters for pypsa store store_filter = (carrier == c) & (tech_type == "store") + # filters for pypsa charger or bicharger charger_or_bicharger_filter = (carrier == c) & ( (tech_type == "charger") | (tech_type == "bicharger") ) + # filters for pypsa discharger or bicharger discharger_or_bicharger_filter = (carrier == c) & ( (tech_type == "discharger") | (tech_type == "bicharger") ) diff --git a/scripts/monte_carlo.py b/scripts/monte_carlo.py index b4f95a85f..daa7b7be3 100644 --- a/scripts/monte_carlo.py +++ b/scripts/monte_carlo.py @@ -177,17 +177,32 @@ def single_best_in_worst_list(worst_list, best_list): """ Return list with single best value per list of worst values + The goal is to create a list of scenarios where each scenario + has one best value and the rest are worst cases. This is useful for + assessing the impact of a single technology on the system. + E.g. if a technology is not optimised in any best case, it is + likely not relevant to include in the optimization or + global sensitivity analysis. + + What is good and what is bad is determined by the user. + See example below for clarification of use cases. + Input: ------ - worst_list: 1D array - best_list: 1D array + worst_list: 1D array, represents the worst value per technology + best_list: 1D array, represents the best value per technology Output: ------- - new_list: 2D array + new_list: 2D array Example: -------- + Let's create a scenario set with 3 technologies (creates 3 scenarios) + # Scenario set when low values are good e.g. costs + >>> single_best_in_worst_list([4, 4, 4], [1, 1, 1]) + [[1, 4, 4], [4, 1, 4], [4, 4, 1]] + # Scenario set when high values are good e.g. efficiency >>> single_best_in_worst_list([1, 1, 1], [4, 4, 4]) [[4, 1, 1], [1, 4, 1], [1, 1, 4]] """ @@ -262,8 +277,8 @@ def single_best_in_worst_list(worst_list, best_list): elif OPTIONS.get("method") == "single_best_in_worst": carrier_no = len(n.stores.carrier.unique()) - worst_list = L_BOUNDS * carrier_no - best_list = U_BOUNDS * carrier_no + worst_list = U_BOUNDS * carrier_no + best_list = L_BOUNDS * carrier_no scenarios = single_best_in_worst_list(worst_list, best_list) ### NETWORK MODIFICATION From c317272efe8b1f094639356cd082e2cae262d703 Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Thu, 26 Jan 2023 21:15:56 +0000 Subject: [PATCH 19/24] add TODO --- scripts/monte_carlo.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/monte_carlo.py b/scripts/monte_carlo.py index daa7b7be3..24f3f4033 100644 --- a/scripts/monte_carlo.py +++ b/scripts/monte_carlo.py @@ -303,6 +303,7 @@ def single_best_in_worst_list(worst_list, best_list): type = k.split(".")[0] # "stores", "generators", ... feature = k.split(".")[1] # "capital_cost", "efficiency", ... + #TODO: Generalize for other features. Currently this scales the whole storage-chain if type == "stores": # scales the whole storage-chain carrier_list = n.stores.carrier.unique() From 14fe873ba02320a8b4e6b160e729017f52283d11 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 26 Jan 2023 21:17:42 +0000 Subject: [PATCH 20/24] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- scripts/monte_carlo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/monte_carlo.py b/scripts/monte_carlo.py index 24f3f4033..3b7fb6300 100644 --- a/scripts/monte_carlo.py +++ b/scripts/monte_carlo.py @@ -194,7 +194,7 @@ def single_best_in_worst_list(worst_list, best_list): Output: ------- - new_list: 2D array + new_list: 2D array Example: -------- @@ -303,7 +303,7 @@ def single_best_in_worst_list(worst_list, best_list): type = k.split(".")[0] # "stores", "generators", ... feature = k.split(".")[1] # "capital_cost", "efficiency", ... - #TODO: Generalize for other features. Currently this scales the whole storage-chain + # TODO: Generalize for other features. Currently this scales the whole storage-chain if type == "stores": # scales the whole storage-chain carrier_list = n.stores.carrier.unique() From 8e6d6d17e96344b85cba1a7bbb7d6a6264de35b4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 8 Feb 2023 13:49:35 +0000 Subject: [PATCH 21/24] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- scripts/add_extra_components.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index d972e2c7a..211e6e4c9 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -70,7 +70,6 @@ def attach_storageunits(n, costs, config): - carriers = config["electricity"]["extendable_carriers"]["StorageUnit"] carriers_classic = [x for x in carriers if x == "H2" or x == "battery"] carriers_database = [x for x in carriers if x != "H2" and x != "battery"] @@ -292,7 +291,6 @@ def attach_stores(n, costs, config): def attach_hydrogen_pipelines(n, costs, config): - ext_carriers = config["electricity"]["extendable_carriers"] as_stores = ext_carriers.get("Store", []) From 99cf8df70b31b78c65dc272e8824e999a1e2367a Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Thu, 9 Feb 2023 13:24:28 +0000 Subject: [PATCH 22/24] enable monte in scenario management --- Snakefile | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/Snakefile b/Snakefile index a7ccaf194..13358daed 100644 --- a/Snakefile +++ b/Snakefile @@ -843,7 +843,8 @@ rule run_scenario: resources: mem_mb=5000, run: - from scripts.build_test_configs import create_test_config + from scripts.build_test_configs import create_test_config, _parse_inputconfig + from ruamel.yaml import YAML # Ensure the scenario name matches the name of the configuration create_test_config( @@ -853,9 +854,15 @@ rule run_scenario: ) # merge the default config file with the difference create_test_config(input.default_config, input.diff_config, "config.yaml") - os.system( - "snakemake -j all solve_all_networks --forceall --rerun-incomplete" - ) + config = _parse_inputconfig("config.yaml", YAML()) + if config["monte_carlo"].get("add_to_snakefile", False) == True: + os.system( + "snakemake -j all solve_all_networks_monte --forceall --rerun-incomplete" + ) + else: + os.system( + "snakemake -j all solve_all_networks --forceall --rerun-incomplete" + ) copyfile("config.yaml", output.copyconfig) From 322652f162cffc20250d397d75633e3c6548b42c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 9 Feb 2023 13:25:34 +0000 Subject: [PATCH 23/24] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- Snakefile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Snakefile b/Snakefile index ebd22aa31..ea1ba93ef 100644 --- a/Snakefile +++ b/Snakefile @@ -843,7 +843,10 @@ rule run_scenario: resources: mem_mb=5000, run: - from scripts.build_test_configs import create_test_config, _parse_inputconfig + from scripts.build_test_configs import ( + create_test_config, + _parse_inputconfig, + ) from ruamel.yaml import YAML # Ensure the scenario name matches the name of the configuration @@ -866,6 +869,7 @@ rule run_scenario: copyfile("config.yaml", output.copyconfig) + rule run_all_scenarios: input: expand( From 884036572af26e81ba1b3eb6027107a611bbfc17 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 24 May 2023 13:22:08 +0000 Subject: [PATCH 24/24] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- Snakefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Snakefile b/Snakefile index e3e48c2f2..110abd7c8 100644 --- a/Snakefile +++ b/Snakefile @@ -907,6 +907,7 @@ rule run_scenario: copyfile("config.yaml", output.copyconfig) + rule run_all_scenarios: input: expand(