diff --git a/Snakefile b/Snakefile index 499eeae62..110abd7c8 100644 --- a/Snakefile +++ b/Snakefile @@ -14,6 +14,7 @@ from snakemake.remote.HTTP import RemoteProvider as HTTPRemoteProvider from scripts._helpers import create_country_list from scripts.build_demand_profiles import get_load_paths_gegis from scripts.retrieve_databundle_light import datafiles_retrivedatabundle +from scripts.monte_carlo import wildcard_creator from pathlib import Path HTTP = HTTPRemoteProvider() @@ -33,9 +34,8 @@ config["countries"] = create_country_list(config["countries"]) # create a list of iteration steps, required to solve the experimental design # each value is used as wildcard input e.g. solution_{unc} -config["scenario"]["unc"] = [ - f"m{i}" for i in range(config["monte_carlo"]["options"]["samples"]) -] +if config["monte_carlo"].get("add_to_snakefile", False) == True: + config["scenario"]["unc"] = wildcard_creator(config) run = config.get("run", {}) RDIR = run["name"] + "/" if run.get("name") else "" @@ -666,41 +666,7 @@ def memory(w): return int(factor * (10000 + 195 * int(w.clusters))) -if config["monte_carlo"]["options"].get("add_to_snakefile", False) == False: - - rule solve_network: - input: - "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", - output: - "results/" + RDIR + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", - log: - solver=normpath( - "logs/" - + RDIR - + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log" - ), - python="logs/" - + RDIR - + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log", - memory="logs/" - + RDIR - + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log", - benchmark: - ( - "benchmarks/" - + RDIR - + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" - ) - threads: 20 - resources: - mem=memory, - shadow: - "shallow" - script: - "scripts/solve_network.py" - - -if config["monte_carlo"]["options"].get("add_to_snakefile", False) == True: +if config["monte_carlo"].get("add_to_snakefile", False) == True: rule monte_carlo: input: @@ -723,18 +689,10 @@ if config["monte_carlo"]["options"].get("add_to_snakefile", False) == True: script: "scripts/monte_carlo.py" - rule solve_monte: - input: - expand( - "networks/" - + RDIR - + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}.nc", - **config["scenario"] - ), - rule solve_network: input: "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}.nc", + tech_costs=COSTS, output: "results/" + RDIR @@ -774,6 +732,40 @@ if config["monte_carlo"]["options"].get("add_to_snakefile", False) == True: **config["scenario"] ), +else: + + rule solve_network: + input: + "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + tech_costs=COSTS, + output: + "results/" + RDIR + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + log: + solver=normpath( + "logs/" + + RDIR + + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log" + ), + python="logs/" + + RDIR + + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log", + memory="logs/" + + RDIR + + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log", + benchmark: + ( + "benchmarks/" + + RDIR + + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" + ) + threads: 20 + resources: + mem=memory, + shadow: + "shallow" + script: + "scripts/solve_network.py" + def input_make_summary(w): # It's mildly hacky to include the separate costs input as first entry @@ -888,26 +880,29 @@ rule run_scenario: resources: mem_mb=5000, run: - from scripts.build_test_configs import create_test_config - import yaml - - # get base configuration file from diff config - with open(input.diff_config) as f: - base_config_path = ( - yaml.full_load(f) - .get("run", {}) - .get("base_config", "config.tutorial.yaml") - ) + from scripts.build_test_configs import ( + create_test_config, + _parse_inputconfig, + ) + from ruamel.yaml import YAML - # Ensure the scenario name matches the name of the configuration + # Ensure the scenario name matches the name of the configuration create_test_config( input.diff_config, {"run": {"name": wildcards.scenario_name}}, input.diff_config, ) # merge the default config file with the difference - create_test_config(base_config_path, input.diff_config, "config.yaml") - os.system("snakemake -j all solve_all_networks --rerun-incomplete") + create_test_config(input.default_config, input.diff_config, "config.yaml") + config = _parse_inputconfig("config.yaml", YAML()) + if config["monte_carlo"].get("add_to_snakefile", False) == True: + os.system( + "snakemake -j all solve_all_networks_monte --forceall --rerun-incomplete" + ) + else: + os.system( + "snakemake -j all solve_all_networks --forceall --rerun-incomplete" + ) os.system("snakemake -j1 make_statistics --force") copyfile("config.yaml", output.copyconfig) diff --git a/config.default.yaml b/config.default.yaml index 8de039363..68734bbd1 100644 --- a/config.default.yaml +++ b/config.default.yaml @@ -329,18 +329,20 @@ costs: monte_carlo: + add_to_snakefile: true options: - add_to_snakefile: false - samples: 7 # number of optimizations - sampling_strategy: "chaospy" # "pydoe2", "chaospy", "scipy", packages that are supported + method: "global_sensitivity" # or single_best_in_worst for technology assessment + samples: 7 # number of optimizations + sampling_strategy: "chaospy" # "pydoe2", "chaospy", "scipy", packages that are supported pypsa_standard: # User can add here flexibly more features for the Monte-Carlo sampling. # Given as "key: value" format # Key: add below the pypsa object for the monte_carlo sampling, "network" is only allowed for filtering! # Value: currently supported format [l_bound, u_bound] or empty [], represent multiplication factors for the object - loads_t.p_set: [0.9, 1.1] - generators_t.p_max_pu.loc[:, n.generators.carrier == "wind"]: [0.9, 1.1] - generators_t.p_max_pu.loc[:, n.generators.carrier == "solar"]: [0.9, 1.1] + # Examples: + # loads_t.p_set: [0.9, 1.1] + # generators_t.p_max_pu.loc[:, n.generators.carrier == "wind"]: [0.9, 1.1] + stores.capital_cost: [0.8, 1.2] # for single_best_in_worst solving: diff --git a/config.tutorial.yaml b/config.tutorial.yaml index 4c475fe2d..63f7f95ed 100644 --- a/config.tutorial.yaml +++ b/config.tutorial.yaml @@ -325,18 +325,20 @@ costs: monte_carlo: + add_to_snakefile: true options: - add_to_snakefile: false - samples: 7 # number of optimizations - sampling_strategy: "chaospy" # "pydoe2", "chaospy", "scipy", packages that are supported + method: "global_sensitivity" # or single_best_in_worst for technology assessment + samples: 7 # number of optimizations + sampling_strategy: "chaospy" # "pydoe2", "chaospy", "scipy", packages that are supported pypsa_standard: # User can add here flexibly more features for the Monte-Carlo sampling. # Given as "key: value" format # Key: add below the pypsa object for the monte_carlo sampling, "network" is only allowed for filtering! # Value: currently supported format [l_bound, u_bound] or empty [], represent multiplication factors for the object - loads_t.p_set: [0.9, 1.1] - generators_t.p_max_pu.loc[:, n.generators.carrier == "wind"]: [0.9, 1.1] - generators_t.p_max_pu.loc[:, n.generators.carrier == "solar"]: [0.9, 1.1] + # Examples: + # loads_t.p_set: [0.9, 1.1] + # generators_t.p_max_pu.loc[:, n.generators.carrier == "wind"]: [0.9, 1.1] + stores.capital_cost: [0.8, 1.2] # for single_best_in_worst solving: diff --git a/doc/configtables/monte-carlo.csv b/doc/configtables/monte-carlo.csv index efde31bd0..8233302d8 100644 --- a/doc/configtables/monte-carlo.csv +++ b/doc/configtables/monte-carlo.csv @@ -1,7 +1,9 @@ -,Unit,Values,Description -options,,, --- add_to_snakemake,,true or false,Add rule to Snakefile or not --- samples,,"int** read description!", "Defines the number of total sample networks that will be later optimized. if orthogonal latin hypercube sampling is applied (default option in scripts), then a prime number, that is not a quadrat of a prime, needs to be chosen. E.g. 7 not 9 (3^2)" --- sampling_strategy,,"Any subset of {""pydoe2"", ""chaospy"", ""scipy""}",Current supported packages to create an experimental design -pypsa_standard,,, --- ,MW/MWh,"[l_bound, u_bound] or empty []","`Key` is a dynamic PyPSA object that allows to access any pypsa object such as `loads_t.p_set` or the max. wind generation per hour `generators_t.p_max_pu.loc[:, n.generators.carrier == ""wind""]`. `Values` or bounds are multiplication for each object." +,Unit,Values,Description,,, +add_to_snakemake,,true or false,Add rule to Snakefile or not,,, +options,,,,,, +-- method,,"""global_sensitivity"" or ""single_best_in_worst""","""global_sensitivity"" create scenarios in latin hypercube space, or ""single_best_in_worst"" create scenarios where all values are pessimistic and only one is optimistic,,, +-- samples,,integer," Defines the number of total sample networks that will be later optimized. if orthogonal latin hypercube sampling is applied (default option in scripts) then a prime number that is not a quadrat of a prime needs to be chosen. E.g. 7 not 9 (3^2)""",,, +-- sampling_strategy,,"Any subset of {""pydoe2"", ""chaospy"", ""scipy""}",Current supported packages to create an experimental design,,, +pypsa_standard,,,,,, +-- ,MW/MWh,"[l_bound, u_bound] or empty []","`Key` is a dynamic PyPSA object that allows to access any pypsa object such as `loads_t.p_set` or the max. wind generation per hour `generators_t.p_max_pu.loc[:, n.generators.carrier == ""wind""]`. `Values` or bounds are multiplication for each object. Method ""any_chance_store_test"" only supports e.g. n.stores = [0.7,1.3]",,, +,,,,,, diff --git a/scripts/_helpers.py b/scripts/_helpers.py index 547f76b92..cc5ff5df3 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -613,6 +613,104 @@ def read_geojson(fn): return gpd.GeoDataFrame(geometry=[]) +def nested_storage_dict(tech_costs): + """ + Create a nested dictionary with a storage index and meta data relation. + + The costs.csv file from the technology_data interface contains metadata + for storage technologies when the PNNL data extractions is activated PR #67. + The metadata is stored in a csv as a nested dictionary value which is + read out by this function and converted to a nested dictionary for further + use. One example use the metadata enables is the automatic energy storage + creation in the model from the config.yaml. + + Input: + ------ + tech_costs: str, path to technology costs.csv file + + Output: + ------- + nested_dict: dict, nested dictionary with storage index and meta data relation + storage_techs: list, list of unique storage technologies + + Example: + -------- + Data format in Input: + costs['further description'][0] -> {'carrier': ['elec', 'nizn', 'elec'], 'technology_type': ['bicharger'], 'type': ['electrochemical']} with index 'Ni-Zn-bicharger' + costs['further description'][1] -> {'carrier': ['nizn'], 'technology_type': ['store'], 'type': ['electrochemical']} with index 'Ni-Zn-store' + + Output: + .. code-block:: python + { + 'Ni-Zn-bicharger': {'carrier': ['elec', 'nizn', 'elec'], 'technology_type': ['bicharger'], 'type': ['electrochemical']} + 'Ni-Zn-store': {'carrier': ['nizn'], 'technology_type': ['store'], 'type': ['electrochemical']} + ... + } + """ + import ast + + df = pd.read_csv( + tech_costs, + index_col=["technology"], + usecols=["technology", "further description"], + ).sort_index() + df = df[df["further description"].str.contains("{'carrier':", na=False)] + storage_techs = df.index.unique() + nested_storage_dict = {} + if df.empty: + print("No storage technology found in costs.csv") + else: + for i in range(len(df)): + storage_dict = ast.literal_eval( + df.iloc[i, 0] + ) # https://stackoverflow.com/a/988251/13573820 + storage_dict.pop("note", None) + nested_storage_dict[df.index[i]] = storage_dict + return [nested_storage_dict, storage_techs] + + +def add_storage_col_to_costs(costs, storage_meta_dict, storage_techs): + """ + Add storage specific columns e.g. "carrier", "type", "technology_type" to costs.csv + + Input: + ------ + costs: pd.DataFrame, costs.csv + storage_meta_dict: dict, nested dictionary with storage index and meta data relation + storage_techs: list, list of unique storage technologies + + Output: + ------- + costs: pd.DataFrame, costs.csv with added storage specific columns + + Example: + -------- + From the nested dictionary: + { + 'Ni-Zn-bicharger': {'carrier': ['elec', 'nizn', 'elec'], 'technology_type': ['bicharger'], 'type': ['electrochemical']} + ... + } + The columns "carrier", "type", "technology_type" will be added to costs.csv + """ + # add storage specific columns to costs.csv + for c in ["carrier", "technology_type", "type"]: + costs.loc[storage_techs, c] = [ + storage_meta_dict[X][c] for X in costs.loc[storage_techs].index + ] + # remove all 'elec's from carrier columns and read carrier as string + for i in range(len(costs.loc[storage_techs])): + costs.loc[storage_techs[i], "carrier"] = "".join( + [e for e in costs.loc[storage_techs].carrier.iloc[i] if e != "elec"] + ) + costs.loc[storage_techs[i], "technology_type"] = "".join( + costs.loc[storage_techs].technology_type.iloc[i] + ) + costs.loc[storage_techs[i], "type"] = "".join( + costs.loc[storage_techs].type.iloc[i] + ) + return costs + + def create_country_list(input, iso_coding=True): """ Create a country list for defined regions in config_osm_data.py diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index ff42506e0..f2e3bb7d4 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -92,7 +92,13 @@ import powerplantmatching as pm import pypsa import xarray as xr -from _helpers import configure_logging, getContinent, update_p_nom_max +from _helpers import ( + add_storage_col_to_costs, + configure_logging, + getContinent, + nested_storage_dict, + update_p_nom_max, +) from shapely.validation import make_valid from vresutils import transfer as vtransfer @@ -147,6 +153,7 @@ def load_costs(tech_costs, config, elec_config, Nyears=1): costs = costs.value.unstack().fillna(config["fill_values"]) + # set marginal cost to all components generators, charger, discharger, store, ... costs["capital_cost"] = ( ( calculate_annuity(costs["lifetime"], costs["discount rate"]) @@ -181,27 +188,49 @@ def load_costs(tech_costs, config, elec_config, Nyears=1): + (1 - config["rooftop_share"]) * costs.at["solar-utility", "capital_cost"] ) - def costs_for_storage(store, link1, link2=None, max_hours=1.0): - capital_cost = link1["capital_cost"] + max_hours * store["capital_cost"] - if link2 is not None: - capital_cost += link2["capital_cost"] + def costs_for_storageunit(store, link1, link2=pd.DataFrame(), max_hours=1.0): + capital_cost = float(link1["capital_cost"]) + float(max_hours) * float( + store["capital_cost"] + ) + if not link2.empty: + capital_cost += float(link2["capital_cost"]) return pd.Series( dict(capital_cost=capital_cost, marginal_cost=0.0, co2_emissions=0.0) ) max_hours = elec_config["max_hours"] - costs.loc["battery"] = costs_for_storage( + costs.loc["battery"] = costs_for_storageunit( costs.loc["battery storage"], costs.loc["battery inverter"], max_hours=max_hours["battery"], ) - costs.loc["H2"] = costs_for_storage( + costs.loc["H2"] = costs_for_storageunit( costs.loc["hydrogen storage tank"], costs.loc["fuel cell"], costs.loc["electrolysis"], max_hours=max_hours["H2"], ) + storage_meta_dict, storage_techs = nested_storage_dict(tech_costs) + costs = add_storage_col_to_costs(costs, storage_meta_dict, storage_techs) + + # add capital_cost to all storage_units indexed by carrier e.g. "lead" or "concrete" + for c in costs.loc[storage_techs, "carrier"].unique(): + carrier = costs.carrier + tech_type = costs.technology_type + store_filter = (carrier == c) & (tech_type == "store") + charger_or_bicharger_filter = (carrier == c) & ( + (tech_type == "bicharger") | (tech_type == "charger") + ) + discharger_filter = (carrier == c) & (tech_type == "discharger") + costs.loc[c] = costs_for_storageunit( + costs.loc[store_filter], + costs.loc[charger_or_bicharger_filter], + costs.loc[discharger_filter], + max_hours=max_hours["battery"], + # TODO: max_hours data should be read as costs.loc[carrier==c,max_hours] (easy possible) + ) + for attr in ("marginal_cost", "capital_cost"): overwrites = config.get(attr) if overwrites is not None: diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index 65f21f075..4eab0656a 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -71,9 +71,10 @@ def attach_storageunits(n, costs, config): - elec_opts = config["electricity"] - carriers = elec_opts["extendable_carriers"]["StorageUnit"] - max_hours = elec_opts["max_hours"] + carriers = config["electricity"]["extendable_carriers"]["StorageUnit"] + carriers_classic = [x for x in carriers if x == "H2" or x == "battery"] + carriers_database = [x for x in carriers if x != "H2" and x != "battery"] + max_hours = config["electricity"]["max_hours"] _add_missing_carriers_from_costs(n, costs, carriers) @@ -82,7 +83,9 @@ def attach_storageunits(n, costs, config): lookup_store = {"H2": "electrolysis", "battery": "battery inverter"} lookup_dispatch = {"H2": "fuel cell", "battery": "battery inverter"} - for carrier in carriers: + # TODO: add metadata to H2 and battery to only read in over database interface + for carrier in carriers_classic: + roundtrip_correction = 0.5 if carrier == "battery" else 1 n.madd( "StorageUnit", buses_i, @@ -92,23 +95,61 @@ def attach_storageunits(n, costs, config): p_nom_extendable=True, capital_cost=costs.at[carrier, "capital_cost"], marginal_cost=costs.at[carrier, "marginal_cost"], - efficiency_store=costs.at[lookup_store[carrier], "efficiency"], - efficiency_dispatch=costs.at[lookup_dispatch[carrier], "efficiency"], + efficiency_store=costs.at[lookup_store[carrier], "efficiency"] + ** roundtrip_correction, + efficiency_dispatch=costs.at[lookup_dispatch[carrier], "efficiency"] + ** roundtrip_correction, max_hours=max_hours[carrier], cyclic_state_of_charge=True, ) + # automated attach of storage_units from database + for carrier in carriers_database: + tech_type = costs.technology_type + charger_or_bicharger_filter = (costs.carrier == carrier) & ( + (tech_type == "charger") | (tech_type == "bicharger") + ) + discharger_or_bicharger_filter = (costs.carrier == carrier) & ( + (tech_type == "discharger") | (tech_type == "bicharger") + ) + n.madd( + "StorageUnit", + buses_i, + " " + carrier, + bus=buses_i, + carrier=carrier, + p_nom_extendable=True, + capital_cost=costs.at[carrier, "capital_cost"], + marginal_cost=costs.at[carrier, "marginal_cost"], + efficiency_store=float( + costs.loc[charger_or_bicharger_filter, "efficiency"] + ), + efficiency_dispatch=float( + costs.loc[discharger_or_bicharger_filter, "efficiency"] + ), + max_hours=max_hours[carrier], + cyclic_state_of_charge=True, + ) -def attach_stores(n, costs, config): - elec_opts = config["electricity"] - carriers = elec_opts["extendable_carriers"]["Store"] +def attach_stores(n, costs, config): + """ + Add storage units as store and links. + + Two types of storage exists: + 1. where every components can be independently sized e.g. hydrogen + 2. where charger and discharger are the same component e.g. Li-battery (inverter) + """ + carriers = config["electricity"]["extendable_carriers"]["Store"] + carriers_classic = [x for x in carriers if x == "H2" or x == "battery"] + carriers_database = [x for x in carriers if x != "H2" and x != "battery"] _add_missing_carriers_from_costs(n, costs, carriers) buses_i = n.buses.index bus_sub_dict = {k: n.buses[k].values for k in ["x", "y", "country"]} - if "H2" in carriers: + # TODO: add metadata to H2 and battery to only read in over database interface + if "H2" in carriers_classic: h2_buses_i = n.madd("Bus", buses_i + " H2", carrier="H2", **bus_sub_dict) n.madd( @@ -141,12 +182,11 @@ def attach_stores(n, costs, config): carrier="H2 fuel cell", p_nom_extendable=True, efficiency=costs.at["fuel cell", "efficiency"], - capital_cost=costs.at["fuel cell", "capital_cost"] - * costs.at["fuel cell", "efficiency"], + capital_cost=costs.at["fuel cell", "capital_cost"], marginal_cost=costs.at["fuel cell", "marginal_cost"], ) - if "battery" in carriers: + if "battery" in carriers_classic: b_buses_i = n.madd( "Bus", buses_i + " battery", carrier="battery", **bus_sub_dict ) @@ -168,6 +208,7 @@ def attach_stores(n, costs, config): bus0=buses_i, bus1=b_buses_i, carrier="battery charger", + # NB: the efficiencies are "round trip efficiencies" efficiency=costs.at["battery inverter", "efficiency"], capital_cost=costs.at["battery inverter", "capital_cost"], p_nom_extendable=True, @@ -180,15 +221,78 @@ def attach_stores(n, costs, config): bus0=b_buses_i, bus1=buses_i, carrier="battery discharger", + # NB: the efficiencies are "round trip efficiencies" efficiency=costs.at["battery inverter", "efficiency"], p_nom_extendable=True, marginal_cost=costs.at["battery inverter", "marginal_cost"], ) + # automated attach of store-link storages from database + for c in carriers_database: + carrier_buses_i = n.madd("Bus", buses_i + f" {c}", carrier=c, **bus_sub_dict) + tech_type = costs.technology_type + carrier = costs.carrier + # filters for pypsa store + store_filter = (carrier == c) & (tech_type == "store") + # filters for pypsa charger or bicharger + charger_or_bicharger_filter = (carrier == c) & ( + (tech_type == "charger") | (tech_type == "bicharger") + ) + # filters for pypsa discharger or bicharger + discharger_or_bicharger_filter = (carrier == c) & ( + (tech_type == "discharger") | (tech_type == "bicharger") + ) + n.madd( + "Store", + carrier_buses_i, + bus=carrier_buses_i, + carrier=c, + e_nom_extendable=True, + e_cyclic=True, + capital_cost=float(costs.loc[store_filter, "capital_cost"]), + ) + + n.madd( + "Link", + carrier_buses_i + " charger", + bus0=buses_i, + bus1=carrier_buses_i, + carrier=f"{c} charger", + p_nom_extendable=True, + efficiency=float(costs.loc[charger_or_bicharger_filter, "efficiency"]), + capital_cost=float(costs.loc[charger_or_bicharger_filter, "capital_cost"]), + marginal_cost=float( + costs.loc[charger_or_bicharger_filter, "marginal_cost"] + ), + ) + + # capital cost of discharger is None if bicharger since already added in previous link + if ( + costs.loc[discharger_or_bicharger_filter].technology_type.item() + == "bicharger" + ): + zero_or_value = 0 + else: + zero_or_value = float( + costs.loc[discharger_or_bicharger_filter, "capital_cost"] + ) + n.madd( + "Link", + carrier_buses_i + " discharger", + bus0=carrier_buses_i, + bus1=buses_i, + carrier=f"{c} discharger", + p_nom_extendable=True, + efficiency=float(costs.loc[discharger_or_bicharger_filter, "efficiency"]), + capital_cost=zero_or_value, + marginal_cost=float( + costs.loc[discharger_or_bicharger_filter, "marginal_cost"] + ), + ) + def attach_hydrogen_pipelines(n, costs, config): - elec_opts = config["electricity"] - ext_carriers = elec_opts["extendable_carriers"] + ext_carriers = config["electricity"]["extendable_carriers"] as_stores = ext_carriers.get("Store", []) if "H2 pipeline" not in ext_carriers.get("Link", []): @@ -250,7 +354,7 @@ def attach_hydrogen_pipelines(n, costs, config): attach_stores(n, costs, config) attach_hydrogen_pipelines(n, costs, config) - add_nice_carrier_names(n, config=snakemake.config) + add_nice_carrier_names(n, config=config) - n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) + n.meta = dict(config, **dict(wildcards=dict(snakemake.wildcards))) n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/monte_carlo.py b/scripts/monte_carlo.py index dce7e609a..b6d26bbea 100644 --- a/scripts/monte_carlo.py +++ b/scripts/monte_carlo.py @@ -78,6 +78,21 @@ logger = logging.getLogger(__name__) +def wildcard_creator(config, method=None): + """ + Creates wildcard for monte-carlo simulations. + """ + method = config["monte_carlo"]["options"].get("method") + if method == "global_sensitivity": + return [f"g{i}" for i in range(config["monte_carlo"]["options"]["samples"])] + + elif method == "single_best_in_worst": + return [ + f"a{i}" + for i in range(len(config["electricity"]["extendable_carriers"]["Store"])) + ] + + def monte_carlo_sampling_pydoe2( N_FEATURES, SAMPLES, @@ -159,6 +174,48 @@ def monte_carlo_sampling_scipy( return lh +def single_best_in_worst_list(worst_list, best_list): + """ + Return list with single best value per list of worst values + + The goal is to create a list of scenarios where each scenario + has one best value and the rest are worst cases. This is useful for + assessing the impact of a single technology on the system. + E.g. if a technology is not optimised in any best case, it is + likely not relevant to include in the optimization or + global sensitivity analysis. + + What is good and what is bad is determined by the user. + See example below for clarification of use cases. + + Input: + ------ + worst_list: 1D array, represents the worst value per technology + best_list: 1D array, represents the best value per technology + + Output: + ------- + new_list: 2D array + + Example: + -------- + Let's create a scenario set with 3 technologies (creates 3 scenarios) + # Scenario set when low values are good e.g. costs + >>> single_best_in_worst_list([4, 4, 4], [1, 1, 1]) + [[1, 4, 4], [4, 1, 4], [4, 4, 1]] + # Scenario set when high values are good e.g. efficiency + >>> single_best_in_worst_list([1, 1, 1], [4, 4, 4]) + [[4, 1, 1], [1, 4, 1], [1, 1, 4]] + """ + new_list = [] + for i in range(len(worst_list)): + l = worst_list.copy() + l[i] = best_list[i] + new_list.append(l) + + return new_list + + if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake @@ -174,72 +231,97 @@ def monte_carlo_sampling_scipy( ) configure_logging(snakemake) config = snakemake.config + n = pypsa.Network(snakemake.input[0]) - ### SCENARIO INPUTS - ### - MONTE_CARLO_PYPSA_FEATURES = { + PYPSA_FEATURES = { k: v for k, v in config["monte_carlo"]["pypsa_standard"].items() if v } # removes key value pairs with empty value e.g. [] - MONTE_CARLO_OPTIONS = config["monte_carlo"]["options"] - L_BOUNDS = [item[0] for item in MONTE_CARLO_PYPSA_FEATURES.values()] - U_BOUNDS = [item[1] for item in MONTE_CARLO_PYPSA_FEATURES.values()] - N_FEATURES = len( - MONTE_CARLO_PYPSA_FEATURES - ) # only counts features when specified in config - SAMPLES = MONTE_CARLO_OPTIONS.get( - "samples" - ) # TODO: What is the optimal sampling? Fabian Neumann answered that in "Broad ranges" paper - SAMPLING_STRATEGY = MONTE_CARLO_OPTIONS.get("sampling_strategy") - - ### SCENARIO CREATION / SAMPLING STRATEGY - ### - if SAMPLING_STRATEGY == "pydoe2": - lh = monte_carlo_sampling_pydoe2( - N_FEATURES, - SAMPLES, - criterion=None, - iteration=None, - random_state=42, - correlation_matrix=None, - ) - if SAMPLING_STRATEGY == "scipy": - lh = monte_carlo_sampling_scipy( - N_FEATURES, - SAMPLES, - centered=False, - strength=2, - optimization=None, - seed=42, - ) - if SAMPLING_STRATEGY == "chaospy": - lh = monte_carlo_sampling_chaospy( - N_FEATURES, - SAMPLES, - rule="latin_hypercube", - seed=42, - ) - lh_scaled = qmc.scale(lh, L_BOUNDS, U_BOUNDS) + OPTIONS = config["monte_carlo"]["options"] + L_BOUNDS = [item[0] for item in PYPSA_FEATURES.values()] + U_BOUNDS = [item[1] for item in PYPSA_FEATURES.values()] + N_FEATURES = len(PYPSA_FEATURES) # only counts features when specified in config - ### MONTE-CARLO MODIFICATIONS + ### SCENARIO CREATION + ### + if OPTIONS.get("method") == "global_sensitivity": + SAMPLES = OPTIONS.get( + "samples" + ) # TODO: What is the optimal sampling? Fabian Neumann answered that in "Broad ranges" paper + SAMPLING_STRATEGY = OPTIONS.get("sampling_strategy") + + if SAMPLING_STRATEGY == "pydoe2": + lh = monte_carlo_sampling_pydoe2( + N_FEATURES, + SAMPLES, + criterion=None, + iteration=None, + random_state=42, + correlation_matrix=None, + ) + if SAMPLING_STRATEGY == "scipy": + lh = monte_carlo_sampling_scipy( + N_FEATURES, + SAMPLES, + centered=False, + strength=2, + optimization=None, + seed=42, + ) + if SAMPLING_STRATEGY == "chaospy": + lh = monte_carlo_sampling_chaospy( + N_FEATURES, + SAMPLES, + rule="latin_hypercube", + seed=42, + ) + scenarios = qmc.scale(lh, L_BOUNDS, U_BOUNDS) + + elif OPTIONS.get("method") == "single_best_in_worst": + carrier_no = len(n.stores.carrier.unique()) + worst_list = U_BOUNDS * carrier_no + best_list = L_BOUNDS * carrier_no + scenarios = single_best_in_worst_list(worst_list, best_list) + + ### NETWORK MODIFICATION ### - n = pypsa.Network(snakemake.input[0]) unc_wildcards = snakemake.wildcards[-1] i = int(unc_wildcards[1:]) j = 0 - for k, v in MONTE_CARLO_PYPSA_FEATURES.items(): - # this loop sets in one scenario each "i" feature assumption - # k is the config input key "loads_t.p_set" - # v is the lower and upper bound [0.8,1.3], that was used for lh_scaled - # i, j interaction number to pick values of experimental setup - # Example: n.loads_t.p_set = network.loads_t.p_set = .loads_t.p_set * lh_scaled[0,0] - exec(f"n.{k} = n.{k} * {lh_scaled[i,j]}") - logger.info(f"Scaled n.{k} by factor {lh_scaled[i,j]} in the {i} scenario") - j = j + 1 + + if OPTIONS.get("method") == "global_sensitivity": + for k, v in PYPSA_FEATURES.items(): + # this loop sets in one scenario each "i" feature assumption + # k is the config input key "loads_t.p_set" + # v is the lower and upper bound [0.8,1.3], that was used for scenarios + # i, j interation number to pick values of experimental setup + # Example: n.loads_t.p_set = n.loads_t.p_set * scenarios[0,0] + exec(f"n.{k} = n.{k} * {scenarios[i,j]}") + logger.info(f"Scaled n.{k} by factor {scenarios[i,j]} in the {i} scenario") + j = j + 1 + + if OPTIONS.get("method") == "single_best_in_worst": + for k, _ in PYPSA_FEATURES.items(): + type = k.split(".")[0] # "stores", "generators", ... + feature = k.split(".")[1] # "capital_cost", "efficiency", ... + + # TODO: Generalize for other features. Currently this scales the whole storage-chain + if type == "stores": + # scales the whole storage-chain + carrier_list = n.stores.carrier.unique() + for c in carrier_list: + n.stores.loc[n.stores.carrier == c, feature] *= scenarios[i][j] + n.links.loc[n.links.carrier.str.contains("H2"), feature] *= scenarios[ + i + ][j] + logger.info( + f"Scaled {feature} for carrier={c} of store and links by factor {scenarios[i][j]} in the {i} scenario" + ) + j += 1 ### EXPORT AND METADATA - # - latin_hypercube_dict = ( - pd.DataFrame(lh_scaled).rename_axis("Nruns").add_suffix("_feature") + ### + scenario_dict = ( + pd.DataFrame(scenarios).rename_axis("iteration").add_suffix("_feature") ).to_dict() - n.meta.update(latin_hypercube_dict) + n.meta.update(scenario_dict) n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/solve_network.py b/scripts/solve_network.py index 948266755..4dfcafeed 100755 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -85,6 +85,7 @@ import pandas as pd import pypsa from _helpers import configure_logging +from add_electricity import load_costs from pypsa.descriptors import get_switchable_as_dense as get_as_dense from pypsa.linopf import ( define_constraints, @@ -346,6 +347,27 @@ def add_battery_constraints(n): define_constraints(n, lhs, "=", 0, "Link", "charger_ratio") +def add_bicharger_constraints(n, costs): + tech_type = costs.technology_type + carriers_bicharger = list(costs.loc[(tech_type == "bicharger"), "carrier"].unique()) + carriers_config = n.config["electricity"]["extendable_carriers"]["Store"] + carriers_intersect = list(set(carriers_bicharger) & set(carriers_config)) + for c in carriers_intersect: + nodes = n.buses.index[n.buses.carrier == c] + if nodes.empty or ("Link", "p_nom") not in n.variables.index: + return + link_p_nom = get_var(n, "Link", "p_nom") + lhs = linexpr( + (1, link_p_nom[nodes + " charger"]), + ( + -n.links.loc[nodes + " discharger", "efficiency"].values, + link_p_nom[nodes + " discharger"].values, + ), + ) + contraint_name = f"charger_ratio_{c}" + define_constraints(n, lhs, "=", 0, "Link", contraint_name) + + def extra_functionality(n, snapshots): """ Collects supplementary constraints which will be passed to ``pypsa.linopf.network_lopf``. @@ -354,6 +376,13 @@ def extra_functionality(n, snapshots): """ opts = n.opts config = n.config + Nyears = n.snapshot_weightings.objective.sum() / 8760.0 + costs = load_costs( + snakemake.input.tech_costs, + config["costs"], + config["electricity"], + Nyears, + ) if "BAU" in opts and n.generators.p_nom_extendable.any(): add_BAU_constraints(n, config) if "SAFE" in opts and n.generators.p_nom_extendable.any(): @@ -367,6 +396,7 @@ def extra_functionality(n, snapshots): if "EQ" in o: add_EQ_constraints(n, o) add_battery_constraints(n) + add_bicharger_constraints(n, costs) def solve_network(n, config, opts="", **kwargs): @@ -387,7 +417,7 @@ def solve_network(n, config, opts="", **kwargs): solver_name=solver_name, solver_options=solver_options, extra_functionality=extra_functionality, - **kwargs + **kwargs, ) else: ilopf( @@ -398,7 +428,7 @@ def solve_network(n, config, opts="", **kwargs): min_iterations=min_iterations, max_iterations=max_iterations, extra_functionality=extra_functionality, - **kwargs + **kwargs, ) return n @@ -411,9 +441,9 @@ def solve_network(n, config, opts="", **kwargs): snakemake = mock_snakemake( "solve_network", simpl="", - clusters="54", + clusters="6", ll="copt", - opts="Co2L-1H", + opts="Co2L-4H", ) configure_logging(snakemake) diff --git a/test/config.monte_carlo.yaml b/test/config.monte_carlo.yaml index b51e53819..e92050c7f 100644 --- a/test/config.monte_carlo.yaml +++ b/test/config.monte_carlo.yaml @@ -7,5 +7,4 @@ retrieve_databundle: # required to be "false" for nice CI test output show_progress: false monte_carlo: - options: - add_to_snakefile: true + add_to_snakefile: true