diff --git a/.github/workflows/nompi4py.yml b/.github/workflows/nompi4py.yml index d2e8b9871..8918fac1c 100644 --- a/.github/workflows/nompi4py.yml +++ b/.github/workflows/nompi4py.yml @@ -24,12 +24,12 @@ jobs: auto-activate-base: false - name: Install dependencies run: | - pip install pyomo sphinx sphinx_rtd_theme cplex - pip install xpress numpy pandas scipy dill + pip install sphinx sphinx_rtd_theme cplex + pip install xpress pandas dill - name: setup the program run: | - python setup.py develop + pip install -e . - name: PH EF tests run: | diff --git a/.github/workflows/pull_push_regression.yml b/.github/workflows/pull_push_regression.yml index 6ee65f648..de321fa90 100644 --- a/.github/workflows/pull_push_regression.yml +++ b/.github/workflows/pull_push_regression.yml @@ -35,7 +35,7 @@ jobs: - name: setup the program run: | - python setup.py develop + pip install -e . - name: Test EF/PH run: | diff --git a/.github/workflows/pyotracker.yml b/.github/workflows/pyotracker.yml index f6fb9bdbc..5c8e59540 100644 --- a/.github/workflows/pyotracker.yml +++ b/.github/workflows/pyotracker.yml @@ -30,7 +30,6 @@ jobs: conda install mpi4py pandas setuptools git pip install sphinx sphinx_rtd_theme cplex pip install xpress - pip install numpy pip install matplotlib - name: set up pyomo diff --git a/.github/workflows/schur_complement.yml b/.github/workflows/schur_complement.yml index 2f6cf47d0..8df9de5e0 100644 --- a/.github/workflows/schur_complement.yml +++ b/.github/workflows/schur_complement.yml @@ -25,18 +25,18 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install numpy scipy nose pybind11 + pip install nose pybind11 conda install openmpi pymumps --no-update-deps pip install mpi4py pandas pip install git+https://github.com/pyutilib/pyutilib.git git clone https://github.com/pyomo/pyomo.git cd pyomo/ - python setup.py develop + pip install -e . pyomo download-extensions pyomo build-extensions cd ../ pip install git+https://github.com/parapint/parapint.git - python setup.py develop + pip install -e . - name: Test with nose run: | nosetests -v mpisppy/tests/test_sc.py diff --git a/.github/workflows/straight.yml b/.github/workflows/straight.yml index c8ff9eb41..2818b876b 100644 --- a/.github/workflows/straight.yml +++ b/.github/workflows/straight.yml @@ -31,7 +31,7 @@ jobs: - name: setup the program run: | - python setup.py develop + pip install -e . - name: mpi tests run: | diff --git a/.github/workflows/testadmmWrapper.yml b/.github/workflows/testadmmWrapper.yml new file mode 100644 index 000000000..c048e32d5 --- /dev/null +++ b/.github/workflows/testadmmWrapper.yml @@ -0,0 +1,42 @@ +# aph (pyomo released) + +name: admmWrapper tests + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +defaults: + run: + shell: bash -l {0} + +jobs: + build: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - uses: conda-incubator/setup-miniconda@v2 + with: + activate-environment: test_env + python-version: 3.9 + auto-activate-base: false + - name: Install dependencies + run: | + conda install mpi4py pandas setuptools + pip install pyomo xpress cplex + pip install numpy + + - name: setup the program + run: | + pip install -e . + + - name: run tests + timeout-minutes: 100 + run: | + cd mpisppy/tests + # envall does nothing + python test_admmWrapper.py diff --git a/.github/workflows/testaph.yml b/.github/workflows/testaph.yml index 44319a9f8..0a5e68e55 100644 --- a/.github/workflows/testaph.yml +++ b/.github/workflows/testaph.yml @@ -28,11 +28,10 @@ jobs: run: | conda install mpi4py pandas setuptools pip install pyomo xpress cplex - pip install numpy - name: setup the program run: | - python setup.py develop + pip install -e . - name: run tests timeout-minutes: 100 diff --git a/.github/workflows/testbunpick.yml b/.github/workflows/testbunpick.yml index b5b2d9f36..dd4036a6e 100644 --- a/.github/workflows/testbunpick.yml +++ b/.github/workflows/testbunpick.yml @@ -26,12 +26,12 @@ jobs: auto-activate-base: false - name: Install dependencies run: | - conda install mpi4py numpy setuptools + conda install mpi4py "numpy<2" setuptools pip install pyomo pandas xpress cplex scipy sympy dill PyYAML Pympler networkx pandas - name: setup the program run: | - python setup.py develop + pip install -e . - name: run pickled bundles tests timeout-minutes: 10 diff --git a/.github/workflows/testconfint.yml b/.github/workflows/testconfint.yml index acf314726..c4ae0a1b6 100644 --- a/.github/workflows/testconfint.yml +++ b/.github/workflows/testconfint.yml @@ -26,12 +26,12 @@ jobs: auto-activate-base: false - name: Install dependencies run: | - conda install mpi4py numpy setuptools + conda install mpi4py "numpy<2" setuptools pip install pyomo pandas xpress cplex scipy sympy dill - name: setup the program run: | - python setup.py develop + pip install -e . - name: run farmer tests timeout-minutes: 10 @@ -43,4 +43,4 @@ jobs: timeout-minutes: 10 run: | cd mpisppy/tests - python test_conf_int_aircond.py \ No newline at end of file + python test_conf_int_aircond.py diff --git a/.github/workflows/testgradient.yml b/.github/workflows/testgradient.yml index f71746f6c..018d185cb 100644 --- a/.github/workflows/testgradient.yml +++ b/.github/workflows/testgradient.yml @@ -24,12 +24,12 @@ jobs: auto-activate-base: false - name: Install dependencies run: | - conda install mpi4py numpy setuptools cmake + conda install mpi4py "numpy<2" setuptools cmake pip install pyomo pandas xpress cplex scipy sympy dill - name: setup the program run: | - python setup.py develop + pip install -e . - name: Build Pyomo extensions run: | diff --git a/.github/workflows/testpysp.yml b/.github/workflows/testpysp.yml index e7a139517..f0d64d2a0 100644 --- a/.github/workflows/testpysp.yml +++ b/.github/workflows/testpysp.yml @@ -29,11 +29,10 @@ jobs: run: | conda install mpi4py pandas setuptools pytest pyyaml networkx pip install pyomo xpress cplex - pip install numpy - name: setup the program run: | - python setup.py develop + pip install -e . - name: run pysp model tests timeout-minutes: 100 diff --git a/.github/workflows/testwithcylinders.yml b/.github/workflows/testwithcylinders.yml index 60f2fb74c..36a4aa403 100644 --- a/.github/workflows/testwithcylinders.yml +++ b/.github/workflows/testwithcylinders.yml @@ -26,12 +26,12 @@ jobs: auto-activate-base: false - name: Install dependencies run: | - conda install mpi4py numpy setuptools + conda install mpi4py "numpy<2" setuptools pip install pyomo pandas xpress cplex scipy - name: setup the program run: | - python setup.py develop + pip install -e . - name: run tests timeout-minutes: 10 diff --git a/README.rst b/README.rst index f5608aa41..94dc9756a 100644 --- a/README.rst +++ b/README.rst @@ -9,7 +9,7 @@ a there is a `paper `_ which creates the model for each scenario. + +.. py:function:: scenario_creator(scenario_name) + + Creates the model, which should include the consensus variables. + However, this function should not create any tree. + + Args: + scenario_name (str): the name of the scenario that will be created. + + Returns: + Pyomo ConcreteModel: the instantiated model + +The driver file also requires helper arguments that are used in mpi-sppy. They are detailed `in helper_functions `_ +and in the example below. +Here is a summary: + +* ``scenario_creator_kwargs``(dict[str]): key words arguments needed in ``scenario_creator`` + +* ``all_scenario_names`` (list of str): the subproblem names + +* A function that is called at termination in some modules (e.g. PH) + .. py:function:: scenario_denouement + + Args: + rank (int): rank in the cylinder + + scenario_name (str): name of the scenario + + scenario (Pyomo ConcreteModel): the instantiated model + + +.. note:: + + Subproblems will be represented in mpi-sppy by scenarios. Consensus variables ensure the problem constraints are + respected, they are represented in mpi-sppy by non-anticipative variables. + The following terms are associated: subproblems = scenarios (= regions in the example), + and nonants = consensus-vars (=flow among regions in the example) + + +The driver also needs global information to link the subproblems. +It should be provided in ``consensus_vars``. + +``consensus_vars`` (dictionary): + * Keys are the subproblems + * Values are the list of consensus variables + +.. note:: + + Every variable in ``consensus_vars[subproblem]`` should also appear as a variable of the subproblem. + +Using the config system ++++++++++++++++++++++++ + +In addition to the previously presented data, the driver also requires arguments tocreate the PH Model and solve it. +Some arguments may be passed to the user via config, but the cylinders need to be added. +.. TBD add external link on precedent line + + +Direct solver of the extensive form ++++++++++++++++++++++++++++++++++++ +``distr_ef.py`` can be used as a verification or debugging tool for small instances. +It directly solves the extensive form using the wrapper ``scenario_creator`` from ``admmWrapper``. +It has the advantage of requiring the same arguments as ``distr_admm_cylinders`` because both solve the extensive form. + +This method offers a verification for small instances, but is slower than ``admmWrapper`` +as it doesn't decompose the problem. + + +.. note:: + + ``admmWrapper`` doesn't collect yet instanciation time. + +Distribution example +-------------------- +``examples.distr.distr.py`` is an example of model creator in admmWrapper for a (linear) inter-region minimal cost distribution problem. +``distr_admm_cylinders.py`` is the driver. + +Original data dictionaries ++++++++++++++++++++++++++++ +The data is created in ``distr_data.py``. Some models are hardwired for 2, 3 and 4 regions. +Other models are created pseudo-randomly thanks to parameters defined in ``data_params.json``. + +In the example the ``inter_region_dict_creator`` (or ``scalable_inter_region_dict_creator``) creates the inter-region information. + +.. autofunction:: examples.distr.distr.inter_region_dict_creator + +The ``region_dict_creator`` (or ``scalable_region_dict_creator``) creates the information specific to a region, +regardless of the other regions. + +.. autofunction:: examples.distr.distr.region_dict_creator + +Adapting the data to create the model ++++++++++++++++++++++++++++++++++++++ + +To solve the regions independantly we must make sure that the constraints (in our example flow balance) would still be respected if the +models were merged. To impose this, consensus variables are introduced. + +In our example a consensus variable is the flow among regions. Indeed, in each regional model we introduce the inter-region arcs +for which either the source or target is in the region to impose the flow balance rule inside the region. But at this stage, nothing +ensures that the flow from DC1 to DC2 represented in Region 1 is the same as the flow from DC1 to DC2 represented in Region 2. +That is why the flow ``flow["DC1DC2"]`` is a consensus variable in both regions: to ensure it is the same.\\ + +The purpose of ``examples.distr.distr.inter_arcs_adder`` is to do that. + +.. autofunction:: examples.distr.distr.inter_arcs_adder + +.. note:: + + In the example the cost of transport is chosen to be split equally in the region source and the region target. + + We here represent the flow problem with a directed graph. If, in additio to the flow from DC1 to DC2 represented by ``flow["DC1DC2"]``, + a flow from DC2 to DC1 were to be authorized we would also have ``flow["DC2DC1"]`` in both regions. + +Once the local_dict is created, the Pyomo model can be created thanks to ``min_cost_distr_problem``. + +.. autofunction:: examples.distr.distr.min_cost_distr_problem + +.. _sectiondatafordriver: + +Transforming data for the driver +++++++++++++++++++++++++++++++++ + +The driver requires five elements given by the model: ``all_scenario_names``, ``scenario_creator``, ``scenario_creator_kwargs``, +``inparser_adder`` and ``consensus_vars``. + +``all_scenario_names`` (see above) is given by ``scenario_names_creator`` + +``scenario_creator`` is created thanks to the previous functions. + +.. autofunction:: examples.distr.distr.scenario_creator + +The dictionary ``scenario_creator_kwargs`` is created with + +.. autofunction:: examples.distr.distr.kw_creator + +The function ``inparser_adder`` requires the user to give ``num_scens`` (the number of regions) during the configuration. +.. autofunction:: examples.distr.distr.inparser_adder + +Contrary to the other helper functions, ``consensus_vars_creator`` is specific to admmWrapper. +The function ``consensus_vars_creator`` creates the required ``consensus_vars`` dictionary. + +.. autofunction:: examples.distr.distr.consensus_vars_creator + +Understanding the driver +++++++++++++++++++++++++ +In the example the driver gets argument from the command line through the function ``_parse_args`` + +.. py:function:: _parse_args() + + Gets argument from the command line and add them to a config argument. Some arguments are required. + + +.. note:: + + Some arguments, such as ``cfg.run_async`` and all the methods creating new cylinders + not only need to be added in the ``_parse_args()`` method, but also need to be called later in the driver. + + +Non local solvers ++++++++++++++++++ + +The file ``globalmodel.py`` and ``distr_ef.py`` are used for debugging or learning. They don't rely on ph or admm, they simply solve the +problem without decomposition. + +* ``globalmodel.py`` + + In ``globalmodel.py``, ``global_dict_creator`` merges the data into a global dictionary + thanks to the inter-region dictionary, without creating inter_region_arcs. Then model is created (as if there was + only one region without arcs leaving it) and solved. + + However this file depends on the structure of the problem and doesn't decompose the problems. + Luckily in this example, the model creator is the same as in distr, because the changes for consensus-vars are neglectible. + However, in general, this file may be difficultly adapted and inefficient. + +* ``distr_ef.py`` + + As presented previously solves the extensive form. + The arguments are the same as ``distr_admm_cylinders.py``, the method doesn't need to be adapted with the model. + diff --git a/doc/src/helper_functions.rst b/doc/src/helper_functions.rst new file mode 100644 index 000000000..38aeeddfa --- /dev/null +++ b/doc/src/helper_functions.rst @@ -0,0 +1,46 @@ +.. _helper_functions: + +Helper functions in the model file +================================== + +The `scenario_creator function `_ is required but some modules also need helper functions in the model file. + +All these functions can be found in the example ``examples.distr.distr.py`` and +are documented in `the admm wrapper documentation `_ + +.. py:function:: kw_creator(cfg) + + Args: + cfg (config object): specifications for the problem + + Returns: + dict (str): the dictionary of key word arguments that is used in ``scenario_creator`` + + +.. py:function:: scenario_names_creator(num_scens, start=0) + + Creates the name of the ``num_scens`` scenarios starting from ``start`` + + Args: + num_scens (int): number of scenarios + start (int): starting index for the names + + Returns: + list (str): the list of names + +.. py:function:: inparser_adder(cfg) + + Adds arguments to the config object which are specific to the problem + + Args: + cfg (config object): specifications for the problem given in the command line + +Some modules (e.g. PH) call this function at termination +.. py:function:: scenario_denouement + + Args: + rank (int): rank in the cylinder + + scenario_name (str): name of the scenario + + scenario (Pyomo ConcreteModel): the instantiated model diff --git a/doc/src/index.rst b/doc/src/index.rst index acc6df13a..588951a1c 100644 --- a/doc/src/index.rst +++ b/doc/src/index.rst @@ -32,6 +32,8 @@ MPI is used. grad_rho.rst w_rho.rst agnostic.rst + admmWrapper.rst + helper_functions.rst contributors.rst internals.rst api.rst diff --git a/doc/src/secretmenu.rst b/doc/src/secretmenu.rst index fae91205d..fcad0e260 100644 --- a/doc/src/secretmenu.rst +++ b/doc/src/secretmenu.rst @@ -48,3 +48,11 @@ hub definition dictionary is called ``hub_dict`` you would add hub_dict["opt_kwargs"]["PHoptions"]["initial_proximal_cut_count"] = 4 before passing ``hub_dict`` to ``spin_the_wheel``. + + +subgradient_while_waiting +------------------------- + +The Lagrangian spoke has an additional argument, `subgradient_while_waiting`, +which will compute subgradient steps while it is waiting on new W's from the +hub. diff --git a/examples/distr/data_params.json b/examples/distr/data_params.json new file mode 100644 index 000000000..7a00b0c95 --- /dev/null +++ b/examples/distr/data_params.json @@ -0,0 +1,45 @@ +{ + "revenues mean": 1200, + "revenues cv": 300, + "production costs mean": 400, + "production costs cv": 100, + "supply factory mean": 150, + "supply factory cv": 50, + "supply buyer mean": 170, + "supply buyer cv": 40, + "arc_F_B": { + "prob": 0.4, + "mean cost": 800, + "cv cost": 200, + "mean capacity": 50, + "cv capacity": 10 + }, + "arc_F_DC": { + "prob": 0.8, + "mean cost": 400, + "cv cost": 250, + "mean capacity": 100, + "cv capacity": 30 + }, + "arc_DC_DC": { + "prob": 0.6, + "mean cost": 200, + "cv cost": 100, + "mean capacity": 120, + "cv capacity": 50 + }, + "arc_DC_B": { + "prob": 0.9, + "mean cost": 300, + "cv cost": 200, + "mean capacity": 110, + "cv capacity": 50 + }, + "inter_region_arc": { + "prob": 0.6, + "mean cost": 200, + "cv cost": 50, + "mean capacity": 40, + "cv capacity": 10 + } + } \ No newline at end of file diff --git a/examples/distr/distr.py b/examples/distr/distr.py new file mode 100644 index 000000000..481dfa6bf --- /dev/null +++ b/examples/distr/distr.py @@ -0,0 +1,227 @@ +# Network Flow - various formulations +import pyomo.environ as pyo +import mpisppy.utils.sputils as sputils +import distr_data +import time + +# In this file, we create a (linear) inter-region minimal cost distribution problem. +# Our data, gives the constraints inside each in region in region_dict_creator +# and amongst the different regions in inter_region_dict_creator +# The data slightly differs depending on the number of regions (num_scens) which is created for 2, 3 or 4 regions + +# Note: regions in our model will be represented in mpi-sppy by scenarios and to ensure the inter-region constraints +# we will impose the inter-region arcs to be consensus-variables. They will be represented as non-ants in mpi-sppy + + +def inter_arcs_adder(region_dict, inter_region_dict): + """This function adds to the region_dict the inter-region arcs + + Args: + region_dict (dict): dictionary for the current scenario \n + inter_region_dict (dict): dictionary of the inter-region relations + + Returns: + local_dict (dict): + This dictionary copies region_dict, completes the already existing fields of + region_dict to represent the inter-region arcs and their capcities and costs + """ + ### Note: The cost of the arc is here chosen to be split equally between the source region and target region + + # region_dict is renamed as local_dict because it no longer contains only information about the region + # but also contains local information that can be linked to the other scenarios (with inter-region arcs) + local_dict = region_dict + for inter_arc in inter_region_dict["arcs"]: + source,target = inter_arc + region_source,node_source = source + region_target,node_target = target + + if region_source == region_dict["name"] or region_target == region_dict["name"]: + arc = node_source, node_target + local_dict["arcs"].append(arc) + local_dict["flow capacities"][arc] = inter_region_dict["capacities"][inter_arc] + local_dict["flow costs"][arc] = inter_region_dict["costs"][inter_arc]/2 + #print(f"scenario name = {region_dict['name']} \n {local_dict=} ") + return local_dict + +###Creates the model when local_dict is given +def min_cost_distr_problem(local_dict, sense=pyo.minimize): + """ Create an arcs formulation of network flow + + Args: + local_dict (dict): dictionary representing a region including the inter region arcs \n + sense (=pyo.minimize): we aim to minimize the cost, this should always be minimize + + Returns: + model (Pyomo ConcreteModel) : the instantiated model + """ + # Assert sense == pyo.minimize, "sense should be equal to pyo.minimize" + # First, make the special In, Out arc lists for each node + arcsout = {n: list() for n in local_dict["nodes"]} + arcsin = {n: list() for n in local_dict["nodes"]} + for a in local_dict["arcs"]: + if a[0] in local_dict["nodes"]: + arcsout[a[0]].append(a) + if a[1] in local_dict["nodes"]: + arcsin[a[1]].append(a) + + model = pyo.ConcreteModel(name='MinCostFlowArcs') + def flowBounds_rule(model, i,j): + return (0, local_dict["flow capacities"][(i,j)]) + model.flow = pyo.Var(local_dict["arcs"], bounds=flowBounds_rule) # x + + def slackBounds_rule(model, n): + if n in local_dict["factory nodes"]: + return (0, local_dict["supply"][n]) + elif n in local_dict["buyer nodes"]: + return (local_dict["supply"][n], 0) + elif n in local_dict["distribution center nodes"]: + return (0,0) + else: + raise ValueError(f"unknown node type for node {n}") + + model.y = pyo.Var(local_dict["nodes"], bounds=slackBounds_rule) + + model.MinCost = pyo.Objective(expr=\ + sum(local_dict["flow costs"][a]*model.flow[a] for a in local_dict["arcs"]) \ + + sum(local_dict["production costs"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["factory nodes"]) \ + + sum(local_dict["revenues"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["buyer nodes"]) , + sense=sense) + + def FlowBalance_rule(m, n): + return sum(m.flow[a] for a in arcsout[n])\ + - sum(m.flow[a] for a in arcsin[n])\ + + m.y[n] == local_dict["supply"][n] + model.FlowBalance = pyo.Constraint(local_dict["nodes"], rule=FlowBalance_rule) + + return model + + +###Creates the scenario +def scenario_creator(scenario_name, inter_region_dict=None, cfg=None, data_params=None, all_nodes_dict=None): + """Creates the model, which should include the consensus variables. \n + However, this function shouldn't attach the consensus variables to root nodes, as it is done in admmWrapper. + + Args: + scenario_name (str): the name of the scenario that will be created. Here is of the shape f"Region{i}" with 1<=i<=num_scens \n + num_scens (int): number of scenarios (regions). Useful to create the corresponding inter-region dictionary + + Returns: + Pyomo ConcreteModel: the instantiated model + """ + assert (inter_region_dict is not None) + assert (cfg is not None) + if cfg.scalable: + assert (data_params is not None) + assert (all_nodes_dict is not None) + region_creation_starting_time = time.time() + region_dict = distr_data.scalable_region_dict_creator(scenario_name, all_nodes_dict=all_nodes_dict, cfg=cfg, data_params=data_params) + region_creation_end_time = time.time() + print(f"time for creating region {scenario_name}: {region_creation_end_time - region_creation_starting_time}") + else: + region_dict = distr_data.region_dict_creator(scenario_name) + # Adding inter region arcs nodes and associated features + local_dict = inter_arcs_adder(region_dict, inter_region_dict) + # Generating the model + model = min_cost_distr_problem(local_dict) + + #varlist = list() + #sputils.attach_root_node(model, model.MinCost, varlist) + + return model + + +###Functions required in other files, which constructions are specific to the problem + +def scenario_denouement(rank, scenario_name, scenario): + """for each scenario prints its name and the final variable values + + Args: + rank (int): not used here, but could be helpful to know the location + scenario_name (str): name of the scenario + scenario (Pyomo ConcreteModel): the instantiated model + """ + print(f"flow values for {scenario_name}") + scenario.flow.pprint() + print(f"slack values for {scenario_name}") + scenario.y.pprint() + pass + + +def consensus_vars_creator(num_scens, inter_region_dict, all_scenario_names): + """The following function creates the consensus_vars dictionary thanks to the inter-region dictionary. \n + This dictionary has redundant information, but is useful for admmWrapper. + + Args: + num_scens (int): select the number of scenarios (regions) wanted + + Returns: + dict: dictionary which keys are the regions and values are the list of consensus variables + present in the region + """ + # Due to the small size of inter_region_dict, it is not given as argument but rather created. + consensus_vars = {} + for inter_arc in inter_region_dict["arcs"]: + source,target = inter_arc + region_source,node_source = source + region_target,node_target = target + arc = node_source, node_target + vstr = f"flow[{arc}]" #variable name as string, y is the slack + + #adds inter region arcs in the source region + if not region_source in consensus_vars: #initiates consensus_vars[region_source] + consensus_vars[region_source] = list() + consensus_vars[region_source].append(vstr) + + #adds inter region arcs in the target region + if not region_target in consensus_vars: #initiates consensus_vars[region_target] + consensus_vars[region_target] = list() + consensus_vars[region_target].append(vstr) + for region in all_scenario_names: + if region not in consensus_vars: + print(f"WARNING: {region} has no consensus_vars") + consensus_vars[region] = list() + return consensus_vars + + +def scenario_names_creator(num_scens): + """Creates the name of every scenario. + + Args: + num_scens (int): number of scenarios + + Returns: + list (str): the list of names + """ + return [f"Region{i+1}" for i in range(num_scens)] + + +def kw_creator(all_nodes_dict, cfg, inter_region_dict, data_params): + """ + Args: + cfg (config): specifications for the problem. We only look at the number of scenarios + + Returns: + dict (str): the kwargs that are used in distr.scenario_creator, here {"num_scens": num_scens} + """ + kwargs = { + "all_nodes_dict" : all_nodes_dict, + "inter_region_dict" : inter_region_dict, + "cfg" : cfg, + "data_params" : data_params, + } + return kwargs + + +def inparser_adder(cfg): + #requires the user to give the number of scenarios + cfg.num_scens_required() + + cfg.add_to_config("scalable", + description="decides whether a sclale model is used", + domain=bool, + default=False) + + cfg.add_to_config("mnpr", + description="max number of nodes per region and per type", + domain=int, + default=4) \ No newline at end of file diff --git a/examples/distr/distr_admm_cylinders.py b/examples/distr/distr_admm_cylinders.py new file mode 100644 index 000000000..f145bc620 --- /dev/null +++ b/examples/distr/distr_admm_cylinders.py @@ -0,0 +1,166 @@ +# general example driver for distr with cylinders +import mpisppy.utils.admmWrapper as admmWrapper +import distr_data +import distr +#import distr_no_dummy as distr +import mpisppy.cylinders + +from mpisppy.spin_the_wheel import WheelSpinner +import mpisppy.utils.sputils as sputils +from mpisppy.utils import config +import mpisppy.utils.cfg_vanilla as vanilla +from mpisppy import MPI +global_rank = MPI.COMM_WORLD.Get_rank() + +import time + +write_solution = False + +def _parse_args(): + # create a config object and parse + cfg = config.Config() + distr.inparser_adder(cfg) + cfg.popular_args() + cfg.two_sided_args() + cfg.ph_args() + cfg.aph_args() + cfg.xhatxbar_args() + cfg.lagrangian_args() + cfg.ph_ob_args() + cfg.tracking_args() + cfg.add_to_config("run_async", + description="Run with async projective hedging instead of progressive hedging", + domain=bool, + default=False) + + cfg.parse_command_line("distr_admm_cylinders") + return cfg + + +# This need to be executed long before the cylinders are created +def _count_cylinders(cfg): + count = 1 + cfglist = ["xhatxbar", "lagrangian", "ph_ob"] # All the cfg arguments that create a new cylinders + # Add to this list any cfg attribute that would create a spoke + for cylname in cfglist: + if cfg[cylname]: + count += 1 + return count + + +def main(): + + cfg = _parse_args() + + if cfg.default_rho is None: # and rho_setter is None + raise RuntimeError("No rho_setter so a default must be specified via --default-rho") + + if cfg.scalable: + import json + json_file_path = "data_params.json" + + # Read the JSON file + with open(json_file_path, 'r') as file: + start_time_creating_data = time.time() + + data_params = json.load(file) + all_nodes_dict = distr_data.all_nodes_dict_creator(cfg, data_params) + all_DC_nodes = [DC_node for region in all_nodes_dict for DC_node in all_nodes_dict[region]["distribution center nodes"]] + inter_region_dict = distr_data.scalable_inter_region_dict_creator(all_DC_nodes, cfg, data_params) + end_time_creating_data = time.time() + creating_data_time = end_time_creating_data - start_time_creating_data + print(f"{creating_data_time=}") + else: + inter_region_dict = distr_data.inter_region_dict_creator(num_scens=cfg.num_scens) + all_nodes_dict = None + data_params = None + + ph_converger = None + + options = {} + all_scenario_names = distr.scenario_names_creator(num_scens=cfg.num_scens) + scenario_creator = distr.scenario_creator + scenario_creator_kwargs = distr.kw_creator(all_nodes_dict, cfg, inter_region_dict, data_params) + consensus_vars = distr.consensus_vars_creator(cfg.num_scens, inter_region_dict, all_scenario_names) + n_cylinders = _count_cylinders(cfg) + admm = admmWrapper.AdmmWrapper(options, + all_scenario_names, + scenario_creator, + consensus_vars, + n_cylinders=n_cylinders, + mpicomm=MPI.COMM_WORLD, + scenario_creator_kwargs=scenario_creator_kwargs, + ) + + # Things needed for vanilla cylinders + scenario_creator = admm.admmWrapper_scenario_creator ##change needed because of the wrapper + scenario_creator_kwargs = None + scenario_denouement = distr.scenario_denouement + #note that the admmWrapper scenario_creator wrapper doesn't take any arguments + variable_probability = admm.var_prob_list + + beans = (cfg, scenario_creator, scenario_denouement, all_scenario_names) + + if cfg.run_async: + # Vanilla APH hub + hub_dict = vanilla.aph_hub(*beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=None, + rho_setter = None, + variable_probability=variable_probability) + + else: + # Vanilla PH hub + hub_dict = vanilla.ph_hub(*beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=None, + ph_converger=ph_converger, + rho_setter=None, + variable_probability=variable_probability) + + # FWPH spoke DOES NOT WORK with variable probability + + # Standard Lagrangian bound spoke + if cfg.lagrangian: + lagrangian_spoke = vanilla.lagrangian_spoke(*beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter = None) + + + # ph outer bounder spoke + if cfg.ph_ob: + ph_ob_spoke = vanilla.ph_ob_spoke(*beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter = None, + variable_probability=variable_probability) + + # xhat looper bound spoke + if cfg.xhatxbar: + xhatxbar_spoke = vanilla.xhatxbar_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + + list_of_spoke_dict = list() + if cfg.lagrangian: + list_of_spoke_dict.append(lagrangian_spoke) + if cfg.ph_ob: + list_of_spoke_dict.append(ph_ob_spoke) + if cfg.xhatxbar: + list_of_spoke_dict.append(xhatxbar_spoke) + + assert n_cylinders == 1 + len(list_of_spoke_dict), f"n_cylinders = {n_cylinders}, len(list_of_spoke_dict) = {len(list_of_spoke_dict)}" + + wheel = WheelSpinner(hub_dict, list_of_spoke_dict) + wheel.spin() + + if write_solution: + wheel.write_first_stage_solution('distr_soln.csv') + wheel.write_first_stage_solution('distr_cyl_nonants.npy', + first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer) + wheel.write_tree_solution('distr_full_solution') + + if global_rank == 0: + best_objective = wheel.spcomm.BestInnerBound + print(f"{best_objective=}") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/examples/distr/distr_data.py b/examples/distr/distr_data.py new file mode 100644 index 000000000..4aa58061c --- /dev/null +++ b/examples/distr/distr_data.py @@ -0,0 +1,403 @@ +# Our data, gives the constraints inside each in region in region_dict_creator +# and amongst the different regions in inter_region_dict_creator +# The data slightly differs depending on the number of regions (num_scens) which is created for 2, 3 or 4 regions + +### This file creates the data through the region_dict and inter_region_dict +# First there is a hard wired data_set, then there is a scalable dataset + +# Hardwired data sets + +def inter_region_dict_creator(num_scens): + """Creates the oriented arcs between the regions, with their capacities and costs. \n + This dictionary represents the inter-region constraints and flows. It indicates where to add dummy nodes. + + Args: + num_scens (int): select the number of scenarios (regions) wanted + + Returns: + dict: + Each arc is presented as a pair (source, target) with source and target containing (scenario_name, node_name) \n + The arcs are used as keys for the dictionaries of costs and capacities + """ + inter_region_dict={} + + if num_scens == 2: + inter_region_dict["arcs"]=[(("Region1","DC1"),("Region2","DC2"))] + inter_region_dict["costs"]={(("Region1","DC1"),("Region2","DC2")): 100} + inter_region_dict["capacities"]={(("Region1","DC1"),("Region2","DC2")): 70} + + elif num_scens == 3: + inter_region_dict["arcs"] = [(("Region1","DC1"),("Region2","DC2")),(("Region2","DC2"),("Region1","DC1")),\ + (("Region1","DC1"),("Region3","DC3_1")),(("Region3","DC3_1"),("Region1","DC1")),\ + (("Region2","DC2"),("Region3","DC3_2")),(("Region3","DC3_2"),("Region2","DC2")),\ + ] + inter_region_dict["costs"] = {(("Region1","DC1"),("Region2","DC2")): 100, (("Region2","DC2"),("Region1","DC1")): 50,\ + (("Region1","DC1"),("Region3","DC3_1")): 200, (("Region3","DC3_1"),("Region1","DC1")): 200,\ + (("Region2","DC2"),("Region3","DC3_2")): 200, (("Region3","DC3_2"),("Region2","DC2")): 200,\ + } + inter_region_dict["capacities"] = {(("Region1","DC1"),("Region2","DC2")): 70, (("Region2","DC2"),("Region1","DC1")): 100,\ + (("Region1","DC1"),("Region3","DC3_1")): 50, (("Region3","DC3_1"),("Region1","DC1")): 50,\ + (("Region2","DC2"),("Region3","DC3_2")): 50, (("Region3","DC3_2"),("Region2","DC2")): 50,\ + } + + elif num_scens == 4: + inter_region_dict["arcs"] = [(("Region1","DC1"),("Region2","DC2")),(("Region2","DC2"),("Region1","DC1")),\ + (("Region1","DC1"),("Region3","DC3_1")),(("Region3","DC3_1"),("Region1","DC1")),\ + (("Region2","DC2"),("Region3","DC3_2")),(("Region3","DC3_2"),("Region2","DC2")),\ + (("Region1","DC1"),("Region4","DC4")),(("Region4","DC4"),("Region1","DC1")),\ + (("Region4","DC4"),("Region2","DC2")),(("Region2","DC2"),("Region4","DC4")),\ + ] + inter_region_dict["costs"] = {(("Region1","DC1"),("Region2","DC2")): 100, (("Region2","DC2"),("Region1","DC1")): 50,\ + (("Region1","DC1"),("Region3","DC3_1")): 200, (("Region3","DC3_1"),("Region1","DC1")): 200,\ + (("Region2","DC2"),("Region3","DC3_2")): 200, (("Region3","DC3_2"),("Region2","DC2")): 200,\ + (("Region1","DC1"),("Region4","DC4")): 30, (("Region4","DC4"),("Region1","DC1")): 50,\ + (("Region4","DC4"),("Region2","DC2")): 100, (("Region2","DC2"),("Region4","DC4")): 70,\ + } + inter_region_dict["capacities"] = {(("Region1","DC1"),("Region2","DC2")): 70, (("Region2","DC2"),("Region1","DC1")): 100,\ + (("Region1","DC1"),("Region3","DC3_1")): 50, (("Region3","DC3_1"),("Region1","DC1")): 50,\ + (("Region2","DC2"),("Region3","DC3_2")): 50, (("Region3","DC3_2"),("Region2","DC2")): 50,\ + (("Region1","DC1"),("Region4","DC4")): 100, (("Region4","DC4"),("Region1","DC1")): 60,\ + (("Region4","DC4"),("Region2","DC2")): 20, (("Region2","DC2"),("Region4","DC4")): 40,\ + } + + return inter_region_dict + + +def region_dict_creator(scenario_name): + """ Create a scenario for the inter-region max profit distribution example. + + The convention for node names is: + Symbol + number of the region (+ _ + number of the example if needed), + with symbols DC for distribution centers, F for factory nodes, B for buyer nodes. \n + For instance: F3_1 is the 1st factory node of region 3. \n + + Args: + scenario_name (str): + Name of the scenario to construct. + + Returns: + region_dict (dict): contains all the information in the given region to create the model. It is composed of:\n + "nodes" (list of str): all the nodes. The following subsets are also nodes: + "factory nodes", "buyer nodes", "distribution center nodes", \n + "arcs" (list of 2 tuples of str) : (node, node) pairs\n + "supply" (dict[n] of float): supply; keys are nodes (negative for demand)\n + "production costs" (dict of float): at each factory node\n + "revenues" (dict of float): at each buyer node \n + "flow costs" (dict[a] of float) : costs per unit flow on each arc \n + "flow capacities" (dict[a] of floats) : upper bound capacities of each arc \n + """ + def _is_partition(L, *lists): + # Step 1: Verify that the union of all sublists contains all elements of L + if set(L) != set().union(*lists): + return False + + # Step 2: Ensure each element in L appears in exactly one sublist + for item in L: + count = 0 + for sublist in lists: + if item in sublist: + count += 1 + if count != 1: + return False + + return True + if scenario_name == "Region1" : + # Creates data for Region1 + region_dict={"name": "Region1"} + region_dict["nodes"] = ["F1_1", "F1_2", "DC1", "B1_1", "B1_2"] + region_dict["factory nodes"] = ["F1_1","F1_2"] + region_dict["buyer nodes"] = ["B1_1","B1_2"] + region_dict["distribution center nodes"]= ["DC1"] + region_dict["supply"] = {"F1_1": 80, "F1_2": 70, "B1_1": -60, "B1_2": -90, "DC1": 0} + region_dict["arcs"] = [("F1_1","DC1"), ("F1_2","DC1"), ("DC1","B1_1"), + ("DC1","B1_2"), ("F1_1","B1_1"), ("F1_2","B1_2")] + + region_dict["production costs"] = {"F1_1": 50, "F1_2": 80} + region_dict["revenues"] = {"B1_1": 800, "B1_2": 900} + # most capacities are 50, so start there and then override + region_dict["flow capacities"] = {a: 50 for a in region_dict["arcs"]} + region_dict["flow capacities"][("F1_1","B1_1")] = None + region_dict["flow capacities"][("F1_2","B1_2")] = None + region_dict["flow costs"] = {("F1_1","DC1"): 300, ("F1_2","DC1"): 500, ("DC1","B1_1"): 200, + ("DC1","B1_2"): 400, ("F1_1","B1_1"): 700, ("F1_2","B1_2"): 1000} + + elif scenario_name=="Region2": + # Creates data for Region2 + region_dict={"name": "Region2"} + region_dict["nodes"] = ["DC2", "B2_1", "B2_2", "B2_3"] + region_dict["factory nodes"] = list() + region_dict["buyer nodes"] = ["B2_1","B2_2","B2_3"] + region_dict["distribution center nodes"]= ["DC2"] + region_dict["supply"] = {"B2_1": -200, "B2_2": -150, "B2_3": -100, "DC2": 0} + region_dict["arcs"] = [("DC2","B2_1"), ("DC2","B2_2"), ("DC2","B2_3")] + + region_dict["production costs"] = {} + region_dict["revenues"] = {"B2_1": 900, "B2_2": 800, "B2_3":1200} + region_dict["flow capacities"] = {("DC2","B2_1"): 200, ("DC2","B2_2"): 150, ("DC2","B2_3"): 100} + region_dict["flow costs"] = {("DC2","B2_1"): 100, ("DC2","B2_2"): 200, ("DC2","B2_3"): 300} + + elif scenario_name == "Region3" : + # Creates data for Region3 + region_dict={"name": "Region3"} + region_dict["nodes"] = ["F3_1", "F3_2", "DC3_1", "DC3_2", "B3_1", "B3_2"] + region_dict["factory nodes"] = ["F3_1","F3_2"] + region_dict["buyer nodes"] = ["B3_1","B3_2"] + region_dict["distribution center nodes"]= ["DC3_1","DC3_2"] + region_dict["supply"] = {"F3_1": 80, "F3_2": 60, "B3_1": -100, "B3_2": -100, "DC3_1": 0, "DC3_2": 0} + region_dict["arcs"] = [("F3_1","DC3_1"), ("F3_2","DC3_2"), ("DC3_1","B3_1"), + ("DC3_2","B3_2"), ("DC3_1","DC3_2"), ("DC3_2","DC3_1")] + + region_dict["production costs"] = {"F3_1": 50, "F3_2": 50} + region_dict["revenues"] = {"B3_1": 900, "B3_2": 700} + region_dict["flow capacities"] = {("F3_1","DC3_1"): 80, ("F3_2","DC3_2"): 60, ("DC3_1","B3_1"): 100, + ("DC3_2","B3_2"): 100, ("DC3_1","DC3_2"): 70, ("DC3_2","DC3_1"): 50} + region_dict["flow costs"] = {("F3_1","DC3_1"): 100, ("F3_2","DC3_2"): 100, ("DC3_1","B3_1"): 201, + ("DC3_2","B3_2"): 200, ("DC3_1","DC3_2"): 100, ("DC3_2","DC3_1"): 100} + + elif scenario_name == "Region4": + # Creates data for Region4 + region_dict={"name": "Region4"} + region_dict["nodes"] = ["F4_1", "F4_2", "DC4", "B4_1", "B4_2"] + region_dict["factory nodes"] = ["F4_1","F4_2"] + region_dict["buyer nodes"] = ["B4_1","B4_2"] + region_dict["distribution center nodes"] = ["DC4"] + region_dict["supply"] = {"F4_1": 200, "F4_2": 30, "B4_1": -100, "B4_2": -100, "DC4": 0} + region_dict["arcs"] = [("F4_1","DC4"), ("F4_2","DC4"), ("DC4","B4_1"), ("DC4","B4_2")] + + region_dict["production costs"] = {"F4_1": 50, "F4_2": 50} + region_dict["revenues"] = {"B4_1": 900, "B4_2": 700} + region_dict["flow capacities"] = {("F4_1","DC4"): 80, ("F4_2","DC4"): 60, ("DC4","B4_1"): 100, ("DC4","B4_2"): 100} + region_dict["flow costs"] = {("F4_1","DC4"): 100, ("F4_2","DC4"): 80, ("DC4","B4_1"): 90, ("DC4","B4_2"): 70} + + else: + raise RuntimeError (f"unknown Region name {scenario_name}") + + assert _is_partition(region_dict["nodes"], region_dict["factory nodes"], region_dict["buyer nodes"], region_dict["distribution center nodes"]) + + return region_dict + +import json +if __name__ == "__main__": + #creating the json files + for num_scens in range(2,5): + inter_region_dict_path = f'json_dicts.inter_region_dict{num_scens}.json' + data = inter_region_dict_creator(num_scens) + + # Write the data to the JSON file + with open(inter_region_dict_path, 'w') as json_file: + json.dump(data, json_file, indent=4) + + for i in range(1,5): + region_dict_path = f'json_dicts.region{i}_dict.json' + data = region_dict_creator(f"Region{i}") + + # Write the data to the JSON file + with open(region_dict_path, 'w') as json_file: + json.dump(data, json_file, indent=4) + +######################################################################################################################## +# Scalable datasets + +import re +import numpy as np + +def parse_node_name(name): + """ decomposes the name, for example "DC1_2 gives" "DC",1,2 + Args: + name (str): name of the node + + Returns: + triplet (str, int, int): type of node ("DC", "F" or "B"), number of the region, and number of the node + """ + # Define the regular expression pattern + pattern = r'^([A-Za-z]+)(\d+)(?:_(\d+))?$' + + # Match the pattern against the provided name + match = re.match(pattern, name) + + if match: + # Extract the prefix, the first number, and the second number (if present) + prefix = match.group(1) + first_number = match.group(2) + second_number = match.group(3) if match.group(3) is not None else '1' + assert prefix in ["DC", "F", "B"] + return prefix, int(first_number), int(second_number) + else: + raise RuntimeError (f"the node {name} can't be well decomposed") + + +def _node_num(max_node_per_region, node_type, region_num, count): + """ + Args: + max_node_per_region (int): maximum number of node per region per type + + Returns: + int: a number specific to the node. This allows to have unrelated seeds + """ + node_types = ["DC", "F", "B"] #no need to include the dummy nodes as they are added automatically + return (max_node_per_region * region_num + count) * len(node_types) + node_types.index(node_type) + +def _pseudo_random_arc(node_1, node_2, prob, cfg, intra=True): #in a Region + """decides pseudo_randomly whether an arc will be created based on the two nodes + + Args: + node_1 (str): name of the source node + node_2 (str): name of the target node + prob (float): probability that the arc is created + cfg (pyomo config): the config arguments + intra (bool, optional): True if the arcs are inside a region, false if the arc is between different regions. Defaults to True. + + Returns: + _type_: _description_ + """ + max_node_per_region = cfg.mnpr + node_type1, region_num1, count1 = parse_node_name(node_1) + node_type2, region_num2, count2 = parse_node_name(node_2) + node_num1 = _node_num(max_node_per_region, node_type1, region_num1, count1) + node_num2 = _node_num(max_node_per_region, node_type2, region_num2, count2) + if intra: + assert region_num1 == region_num2, f"supposed to happen in a region ({intra=}), but {region_num1, region_num2=}" + else: + if region_num1 == region_num2: # inside a region, however intra=False so no connexion + return False + max_node_num = _node_num(max_node_per_region, "B", cfg.num_scens+1, max_node_per_region+1) # maximum number possible + np.random.seed(min(node_num1, node_num2) * max_node_num + max(node_num1, node_num2)) # it is symmetrical + random_number = np.random.rand() + # Determine if the event occurs + boo = random_number < prob + return boo + + +def _intra_arc_creator(my_dict, node_1, node_2, cfg, arc, arc_params, my_seed, intra=True): + """if the arc is chosen randomly to be constructed, it is added to the dictionary with its cost and capacity + + Args: + my_dict (dict): either a region_dict if intra=True, otherwise the inter_region_dict + node_1 (str): name of the source node + node_2 (str): name of the target node + cfg (pyomo config): the config arguments + arc (pair of pair of strings): of the shape source,target with source = region_source, node_source + arc_params (dict of bool): parameters for random cost and capacity + my_seed (int): unique number used as seed + intra (bool, optional): True if the arcs are inside a region, false if the arc is between different regions. Defaults to True. + """ + prob = arc_params["prob"] + mean_cost = arc_params["mean cost"] + cv_cost = arc_params["cv cost"] + mean_capacity = arc_params["mean capacity"] + cv_capacity = arc_params["cv capacity"] + if _pseudo_random_arc(node_1, node_2, prob, cfg, intra=intra): + my_dict["arcs"].append(arc) + np.random.seed(my_seed % 2**32) + if intra: # not the same names used + cost_name = "flow costs" + capacity_name = "flow capacities" + else: + cost_name = "costs" + capacity_name = "capacities" + my_dict[cost_name][arc] = max(np.random.normal(mean_cost,cv_cost),0) + np.random.seed((2**31+my_seed) % 2**32) + my_dict[capacity_name][arc] = max(np.random.normal(mean_capacity,cv_capacity),0) + + +def scalable_inter_region_dict_creator(all_DC_nodes, cfg, data_params): # same as inter_region_dict_creator but the scalable version + inter_region_arc_params = data_params["inter_region_arc"] + inter_region_dict={} + inter_region_dict["arcs"] = list() + inter_region_dict["costs"] = {} + inter_region_dict["capacities"] = {} + count = 0 + for node_1 in all_DC_nodes: #although inter_region_dict["costs"] and ["capacities"] could be done with comprehension, "arcs" can't + for node_2 in all_DC_nodes: + _, region_num1, _ = parse_node_name(node_1) + source = f"Region{region_num1}", node_1 + _, region_num2, _ = parse_node_name(node_2) + target = f"Region{region_num2}", node_2 + arc = source, target + _intra_arc_creator(inter_region_dict, node_1, node_2, cfg, arc, inter_region_arc_params, count, intra=False) + count += 1 + return inter_region_dict + + +def all_nodes_dict_creator(cfg, data_params): + """ + Args: + cfg (pyomo config): configuration arguments + data_params (nested dict): allows to construct the random probabilities + + Returns: + (dict of str): the keys are regions containing all their nodes. + """ + all_nodes_dict = {} + num_scens = cfg.num_scens + max_node_per_region = cfg.mnpr # maximum node node of a certain type in any region + all_nodes_dict = {} + production_costs_mean = data_params["production costs mean"] + production_costs_cv = data_params["production costs cv"] #coefficient of variation + revenues_mean = data_params["revenues mean"] + revenues_cv = data_params["revenues cv"] + supply_factory_mean = data_params["supply factory mean"] + supply_factory_cv = data_params["supply factory cv"] + supply_buyer_mean = data_params["supply buyer mean"] + supply_buyer_cv = data_params["supply buyer cv"] + for i in range(1, num_scens+1): + region_name = f"Region{i}" + all_nodes_dict[region_name] = {} + node_types = ["DC", "F", "B"] + all_nodes_dict[region_name]["nodes"] = [] + association_types = {"DC": "distribution center nodes", "F": "factory nodes", "B": "buyer nodes"} + all_nodes_dict[region_name]["production costs"] = {} + all_nodes_dict[region_name]["revenues"] = {} + all_nodes_dict[region_name]["supply"] = {} + for node_type in node_types: + node_base_num = _node_num(max_node_per_region, node_type, i, 1) #the first node that will be created will have this number + # That helps us to have a seed, thanks to that we choose an integer which will be the number of nodes of this type + np.random.seed(node_base_num) + m = np.random.randint(0, max_node_per_region) + all_nodes_dict[region_name][association_types[node_type]] = [node_type + str(i) + "_" +str(j) for j in range(1, m+1)] + all_nodes_dict[region_name]["nodes"] += all_nodes_dict[region_name][association_types[node_type]] + if node_type == "F": + count = 1 + for node_name in all_nodes_dict[region_name][association_types[node_type]]: + np.random.seed(_node_num(max_node_per_region, node_type, i, count) + 2**28) + all_nodes_dict[region_name]["production costs"][node_name] = max(0,np.random.normal(production_costs_mean,production_costs_cv)) + np.random.seed(_node_num(max_node_per_region, node_type, i, count) + 2*2**28) + all_nodes_dict[region_name]["supply"][node_name] = max(0,np.random.normal(supply_factory_mean,supply_factory_cv)) #positive + count += 1 + if node_type == "B": + count = 1 + for node_name in all_nodes_dict[region_name][association_types[node_type]]: + np.random.seed(_node_num(max_node_per_region, node_type, i, count) + 2**28) + all_nodes_dict[region_name]["revenues"][node_name] = max(0, np.random.normal(revenues_mean,revenues_cv)) + np.random.seed(_node_num(max_node_per_region, node_type, i, count) + 2*2**28) + all_nodes_dict[region_name]["supply"][node_name] = - max(0, np.random.normal(supply_buyer_mean,supply_buyer_cv)) #negative + count += 1 + if node_type == "DC": + for node_name in all_nodes_dict[region_name][association_types[node_type]]: + all_nodes_dict[region_name]["supply"][node_name] = 0 + return all_nodes_dict + + +def scalable_region_dict_creator(scenario_name, all_nodes_dict=None, cfg=None, data_params=None): # same as region_dict_creator but the scalable version + assert all_nodes_dict is not None + assert cfg is not None + assert data_params is not None + local_nodes_dict = all_nodes_dict[scenario_name] + region_dict = local_nodes_dict + region_dict["name"] = scenario_name + region_dict["arcs"] = list() + region_dict["flow costs"] = {} + region_dict["flow capacities"] = {} + count = 2**30 # to have unrelated data with the production_costs + for node_1 in local_nodes_dict["nodes"]: #although inter_region_dict["costs"] and ["capacities"] could be done with comprehension, "arcs" can't + for node_2 in local_nodes_dict["nodes"]: + node_type1, _, _ = parse_node_name(node_1) + node_type2, _, _ = parse_node_name(node_2) + arcs_association = {("F","DC") : data_params["arc_F_DC"], ("DC", "B") : data_params["arc_DC_B"], ("F", "B") : data_params["arc_F_B"], ("DC", "DC"): data_params["arc_DC_DC"]} + arc_type = (node_type1, node_type2) + if arc_type in arcs_association: + arc_params = arcs_association[arc_type] + arc = (node_1, node_2) + _intra_arc_creator(region_dict, node_1, node_2, cfg, arc, arc_params, my_seed=count, intra=True) + count += 1 + return region_dict \ No newline at end of file diff --git a/examples/distr/distr_ef.py b/examples/distr/distr_ef.py new file mode 100644 index 000000000..a07b8d930 --- /dev/null +++ b/examples/distr/distr_ef.py @@ -0,0 +1,97 @@ +# Copyright 2020 by B. Knueven, D. Mildebrath, C. Muir, J-P Watson, and D.L. Woodruff +# This software is distributed under the 3-clause BSD License. +# general example driver for distr with cylinders + +# This file can be executed thanks to python distr_ef.py --num-scens 2 --solver-name cplex_direct + +import mpisppy.utils.admmWrapper as admmWrapper +import distr +import distr_data +import mpisppy.cylinders +import pyomo.environ as pyo + +import mpisppy.utils.sputils as sputils +from mpisppy.utils import config +from mpisppy import MPI +global_rank = MPI.COMM_WORLD.Get_rank() + + +write_solution = True + +def _parse_args(): + # create a config object and parse + cfg = config.Config() + distr.inparser_adder(cfg) + cfg.add_to_config("solver_name", + description="Choice of the solver", + domain=str, + default=None) + + cfg.parse_command_line("distr_ef2") + return cfg + + +def solve_EF_directly(admm,solver_name): + scenario_names = admm.local_scenario_names + scenario_creator = admm.admmWrapper_scenario_creator + + ef = sputils.create_EF( + scenario_names, + scenario_creator, + nonant_for_fixed_vars=False, + ) + solver = pyo.SolverFactory(solver_name) + if 'persistent' in solver_name: + solver.set_instance(ef, symbolic_solver_labels=True) + solver.solve(tee=True) + else: + solver.solve(ef, tee=True, symbolic_solver_labels=True,) + return ef + +def main(): + + cfg = _parse_args() + if cfg.scalable: + import json + json_file_path = "data_params.json" + + # Read the JSON file + with open(json_file_path, 'r') as file: + data_params = json.load(file) + all_nodes_dict = distr_data.all_nodes_dict_creator(cfg, data_params) + all_DC_nodes = [DC_node for region in all_nodes_dict for DC_node in all_nodes_dict[region]["distribution center nodes"]] + inter_region_dict = distr_data.scalable_inter_region_dict_creator(all_DC_nodes, cfg, data_params) + else: + inter_region_dict = distr_data.inter_region_dict_creator(num_scens=cfg.num_scens) + all_nodes_dict = None + data_params = None + + options = {} + all_scenario_names = distr.scenario_names_creator(num_scens=cfg.num_scens) + scenario_creator = distr.scenario_creator + scenario_creator_kwargs = distr.kw_creator(all_nodes_dict, cfg, inter_region_dict, data_params) + consensus_vars = distr.consensus_vars_creator(cfg.num_scens, inter_region_dict, all_scenario_names) + + n_cylinders = 1 + admm = admmWrapper.AdmmWrapper(options, + all_scenario_names, + scenario_creator, + consensus_vars, + n_cylinders=n_cylinders, + mpicomm=MPI.COMM_WORLD, + scenario_creator_kwargs=scenario_creator_kwargs, + ) + + solved_ef = solve_EF_directly(admm, cfg.solver_name) + with open("ef.txt", "w") as f: + solved_ef.pprint(f) + print ("******* model written to ef.txt *******") + solution_file_name = "solution_distr.txt" + sputils.write_ef_first_stage_solution(solved_ef, + solution_file_name,) + print(f"ef solution written to {solution_file_name}") + print(f"EF objective: {pyo.value(solved_ef.EF_Obj)}") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/examples/distr/globalmodel.py b/examples/distr/globalmodel.py new file mode 100644 index 000000000..973afad26 --- /dev/null +++ b/examples/distr/globalmodel.py @@ -0,0 +1,93 @@ +# Distribution example without decomposition, this file is only written to execute the non-scalable example + +### This code line can execute the script for a certain example +# python globalmodel.py --solver-name cplex_direct --num-scens 3 + + +import pyomo.environ as pyo +import distr +import distr_data +from mpisppy.utils import config + +def _parse_args(): + # create a config object and parse + cfg = config.Config() + distr.inparser_adder(cfg) + cfg.add_to_config("solver_name", + description="which solver", + domain=str, + default=None, + argparse_args = {"required": True}, + ) + cfg.parse_command_line("globalmodel") + + return cfg + + +def global_dict_creator(num_scens, start=0): + """Merges the different region_dict thanks to the inter_region_dict in distr to create a global dictionary + + Args: + num_scens (int): number of regions wanted + start (int, optional): unuseful here. Defaults to 0. + + Returns: + dict: dictionary with the information to create a min cost distribution problem + """ + inter_region_dict = distr_data.inter_region_dict_creator(num_scens) + global_dict={} + for i in range(start,start+num_scens): + scenario_name=f"Region{i+1}" + region_dict = distr_data.region_dict_creator(scenario_name) + for key in region_dict: + if key in ["nodes","factory nodes", "buyer nodes", "distribution center nodes", "arcs"]: + if i == start: + global_dict[key]=[] + for x in region_dict[key]: + global_dict[key].append(x) + if key in ["supply", "production costs","revenues", "flow capacities", "flow costs"]: + if i == start: + global_dict[key]={} + for key2 in region_dict[key]: + global_dict[key][key2] = region_dict[key][key2] + + def _extract_arc(a): + source, target = a + node_source = source[1] + node_target = target[1] + return (node_source, node_target) + + for a in inter_region_dict["arcs"]: + global_dict["arcs"].append(_extract_arc(a)) + for a in inter_region_dict["costs"]: + global_dict["flow costs"][_extract_arc(a)] = inter_region_dict["costs"][a] + for a in inter_region_dict["capacities"]: + global_dict["flow capacities"][_extract_arc(a)] = inter_region_dict["capacities"][a] + return global_dict + +def main(): + """ + do all the work + """ + cfg = _parse_args() + assert cfg.scalable is False, "the global model example has not been adapted for the scalable example" + model = distr.min_cost_distr_problem(global_dict_creator(num_scens=cfg.num_scens)) + + solver_name = cfg.solver_name + opt = pyo.SolverFactory(solver_name) + results = opt.solve(model) # solves and updates model + pyo.assert_optimal_termination(results) + model.pprint() + + # Grabs the objective function + objectives = model.component_objects(pyo.Objective, active=True) + count = 0 + for obj in objectives: + objective_value = pyo.value(obj) + count += 1 + assert count == 1, f"only one objective function is authorized, there are {count}" + print(f"Objective '{obj}' value: {objective_value}") + + +if __name__ == "__main__": + main() diff --git a/examples/distr/go.bash b/examples/distr/go.bash new file mode 100644 index 000000000..9bdcd04fb --- /dev/null +++ b/examples/distr/go.bash @@ -0,0 +1,8 @@ +#!/bin/bash + +# How to run this bash script: +# Execute with "bash go.bash scalable" for the scalable example +# Execute with "bash go.bash anything" otherwise + +# This file runs either a scalable example or a non scalable example +mpiexec -np 3 python -u -m mpi4py distr_admm_cylinders.py --num-scens 3 --default-rho 10 --solver-name xpress --max-iterations 50 --xhatxbar --lagrangian --mnpr 4 --rel-gap 0.05 diff --git a/examples/hydro/demo.bash b/examples/hydro/demo.bash index e0df5f989..05880482a 100644 --- a/examples/hydro/demo.bash +++ b/examples/hydro/demo.bash @@ -2,7 +2,7 @@ SOLVERNAME=cplex -mpiexec --oversubscribe --np 3 python -m mpi4py hydro_cylinders.py --branching-factors 3 3 --bundles-per-rank=0 --max-iterations=100 --default-rho=1 --with-xhatshuffle --with-lagrangian --solver-name=${SOLVERNAME} --stage2EFsolvern=${SOLVERNAME} +mpiexec --oversubscribe --np 3 python -m mpi4py hydro_cylinders.py --branching-factors 3 3 --bundles-per-rank=0 --max-iterations=100 --default-rho=1 --xhatshuffle --lagrangian --solver-name=${SOLVERNAME} --stage2EFsolvern=${SOLVERNAME} # including this option will cause the upper bounder to solve the EF since there are only three ranks in total. #--stage2EFsolvern=${SOLVERNAME} diff --git a/examples/hydro/hydro.py b/examples/hydro/hydro.py index 77b7d146d..d439d35ab 100644 --- a/examples/hydro/hydro.py +++ b/examples/hydro/hydro.py @@ -184,7 +184,8 @@ def MakeNodesforScen(model, BFs, scennum): Args: BFs (list of int): branching factors """ - ndn = "ROOT_"+str((scennum-1) // BFs[0]) # scennum is one-based + # In general divide by the product of the branching factors that come after the node (here prod(BFs[1:])=BFs[1]) + ndn = "ROOT_"+str((scennum-1) // BFs[1]) # scennum is one-based retval = [scenario_tree.ScenarioNode("ROOT", 1.0, 1, @@ -228,6 +229,7 @@ def scenario_creator(scenario_name, branching_factors=None, data_path=None): instance = model.create_instance(fname, name=scenario_name) instance._mpisppy_node_list = MakeNodesforScen(instance, branching_factors, snum) + model._mpisppy_probability = "uniform" return instance #============================================================================= diff --git a/examples/hydro/hydro_cylinders.py b/examples/hydro/hydro_cylinders.py index 12e94ce09..5bc06776e 100644 --- a/examples/hydro/hydro_cylinders.py +++ b/examples/hydro/hydro_cylinders.py @@ -92,6 +92,7 @@ def main(): list_of_spoke_dict.append(xhatshuffle_spoke) if cfg.stage2EFsolvern is not None: + assert xhatshuffle is not None, "xhatshuffle is required for stage2EFsolvern" xhatshuffle_spoke["opt_kwargs"]["options"]["stage2EFsolvern"] = cfg["stage2EFsolvern"] xhatshuffle_spoke["opt_kwargs"]["options"]["branching_factors"] = cfg["branching_factors"] diff --git a/examples/run_all.py b/examples/run_all.py index bf1a292dd..43977c916 100644 --- a/examples/run_all.py +++ b/examples/run_all.py @@ -337,7 +337,6 @@ def do_one_mmw(dirname, runefstring, npyfile, mmwargstring): # sizes kills the github tests using xpress # so we use linearized proximal terms -do_one("sizes", "sizes_demo.py", 1, " {}".format(solver_name)) do_one("sizes", "special_cylinders.py", @@ -399,6 +398,8 @@ def do_one_mmw(dirname, runefstring, npyfile, mmwargstring): "--lagrangian-iter0-mipgap=1e-7 --cross-scenario-cuts " "--ph-mipgaps-json=phmipgaps.json --cross-scenario-iter-cnt=4 " "--solver-name={}".format(solver_name)) + # this one takes a long time, so I moved it into the uc section + do_one("sizes", "sizes_demo.py", 1, " {}".format(solver_name)) if len(badguys) > 0: print("\nBad Guys:") diff --git a/mpisppy/cylinders/lagrangian_bounder.py b/mpisppy/cylinders/lagrangian_bounder.py index 30ebfebd9..6766d95e9 100644 --- a/mpisppy/cylinders/lagrangian_bounder.py +++ b/mpisppy/cylinders/lagrangian_bounder.py @@ -58,6 +58,7 @@ def main(self): if hasattr(self.opt, 'rho_setter'): rho_setter = self.opt.rho_setter extensions = self.opt.extensions is not None + verbose = self.opt.options['verbose'] self.lagrangian_prep() @@ -86,3 +87,10 @@ def main(self): if extensions: self.opt.extobject.enditer_after_sync() self.dk_iter += 1 + elif self.opt.options.get("subgradient_while_waiting", False): + # compute a subgradient step + self.opt.Compute_Xbar(verbose) + self.opt.Update_W(verbose) + bound = self.lagrangian() + if bound is not None: + self.bound = bound diff --git a/mpisppy/fwph/fwph.py b/mpisppy/fwph/fwph.py index cf29fbd75..08c5236bc 100644 --- a/mpisppy/fwph/fwph.py +++ b/mpisppy/fwph/fwph.py @@ -64,6 +64,7 @@ def __init__( scenario_creator_kwargs=None, ph_converger=None, rho_setter=None, + variable_probability=None, ): super().__init__( PH_options, @@ -77,8 +78,8 @@ def __init__( extension_kwargs=None, ph_converger=ph_converger, rho_setter=rho_setter, - ) - + ) + assert (variable_probability == None), "variable probability is not allowed with fwph" self._init(FW_options) def _init(self, FW_options): diff --git a/mpisppy/spopt.py b/mpisppy/spopt.py index f6c282920..49297fb2d 100644 --- a/mpisppy/spopt.py +++ b/mpisppy/spopt.py @@ -896,8 +896,10 @@ def _create_solvers(self, presolve=True): root=0) if self.cylinder_rank == 0: asit = [sit for l_sit in all_set_instance_times for sit in l_sit] - print("Set instance times:") - print("\tmin=%4.2f mean=%4.2f max=%4.2f" % + if len(asit) == 0: + print("Set instance times not available.") + else: + print("Set instance times: \tmin=%4.2f mean=%4.2f max=%4.2f" % (np.min(asit), np.mean(asit), np.max(asit))) diff --git a/mpisppy/tests/examples/distr.py b/mpisppy/tests/examples/distr.py new file mode 100644 index 000000000..699d50ba0 --- /dev/null +++ b/mpisppy/tests/examples/distr.py @@ -0,0 +1,403 @@ +# This file is used in the tests and should not be modified! + +import pyomo.environ as pyo +import mpisppy.utils.sputils as sputils + +# In this file, we create a (linear) inter-region minimal cost distribution problem. +# Our data, gives the constraints inside each in region in region_dict_creator +# and amongst the different regions in inter_region_dict_creator +# The data slightly differs depending on the number of regions (num_scens) which is created for 2, 3 or 4 regions + +# Note: regions in our model will be represented in mpi-sppy by scenarios and to ensure the inter-region constraints +# we will use dummy-nodes, which will be represented in mpi-sppy by non-anticipative variables +# The following association of terms are made: regions = scenarios, and dummy-nodes = nonants = consensus-vars + +### The following functions create the data + +def inter_region_dict_creator(num_scens): + """Creates the oriented arcs between the regions, with their capacities and costs. \n + This dictionary represents the inter-region constraints and flows. It indicates where to add dummy nodes. + + Args: + num_scens (int): select the number of scenarios (regions) wanted + + Returns: + dict: + Each arc is presented as a pair (source, target) with source and target containing (scenario_name, node_name) \n + The arcs are used as keys for the dictionaries of costs and capacities + """ + inter_region_dict={} + + if num_scens == 2: + inter_region_dict["arcs"]=[(("Region1","DC1"),("Region2","DC2"))] + inter_region_dict["costs"]={(("Region1","DC1"),("Region2","DC2")): 100} + inter_region_dict["capacities"]={(("Region1","DC1"),("Region2","DC2")): 70} + + elif num_scens == 3: + inter_region_dict["arcs"] = [(("Region1","DC1"),("Region2","DC2")),(("Region2","DC2"),("Region1","DC1")),\ + (("Region1","DC1"),("Region3","DC3_1")),(("Region3","DC3_1"),("Region1","DC1")),\ + (("Region2","DC2"),("Region3","DC3_2")),(("Region3","DC3_2"),("Region2","DC2")),\ + ] + inter_region_dict["costs"] = {(("Region1","DC1"),("Region2","DC2")): 100, (("Region2","DC2"),("Region1","DC1")): 50,\ + (("Region1","DC1"),("Region3","DC3_1")): 200, (("Region3","DC3_1"),("Region1","DC1")): 200,\ + (("Region2","DC2"),("Region3","DC3_2")): 200, (("Region3","DC3_2"),("Region2","DC2")): 200,\ + } + inter_region_dict["capacities"] = {(("Region1","DC1"),("Region2","DC2")): 70, (("Region2","DC2"),("Region1","DC1")): 100,\ + (("Region1","DC1"),("Region3","DC3_1")): 50, (("Region3","DC3_1"),("Region1","DC1")): 50,\ + (("Region2","DC2"),("Region3","DC3_2")): 50, (("Region3","DC3_2"),("Region2","DC2")): 50,\ + } + + elif num_scens == 4: + inter_region_dict["arcs"] = [(("Region1","DC1"),("Region2","DC2")),(("Region2","DC2"),("Region1","DC1")),\ + (("Region1","DC1"),("Region3","DC3_1")),(("Region3","DC3_1"),("Region1","DC1")),\ + (("Region2","DC2"),("Region3","DC3_2")),(("Region3","DC3_2"),("Region2","DC2")),\ + (("Region1","DC1"),("Region4","DC4")),(("Region4","DC4"),("Region1","DC1")),\ + (("Region4","DC4"),("Region2","DC2")),(("Region2","DC2"),("Region4","DC4")),\ + ] + inter_region_dict["costs"] = {(("Region1","DC1"),("Region2","DC2")): 100, (("Region2","DC2"),("Region1","DC1")): 50,\ + (("Region1","DC1"),("Region3","DC3_1")): 200, (("Region3","DC3_1"),("Region1","DC1")): 200,\ + (("Region2","DC2"),("Region3","DC3_2")): 200, (("Region3","DC3_2"),("Region2","DC2")): 200,\ + (("Region1","DC1"),("Region4","DC4")): 30, (("Region4","DC4"),("Region1","DC1")): 50,\ + (("Region4","DC4"),("Region2","DC2")): 100, (("Region2","DC2"),("Region4","DC4")): 70,\ + } + inter_region_dict["capacities"] = {(("Region1","DC1"),("Region2","DC2")): 70, (("Region2","DC2"),("Region1","DC1")): 100,\ + (("Region1","DC1"),("Region3","DC3_1")): 50, (("Region3","DC3_1"),("Region1","DC1")): 50,\ + (("Region2","DC2"),("Region3","DC3_2")): 50, (("Region3","DC3_2"),("Region2","DC2")): 50,\ + (("Region1","DC1"),("Region4","DC4")): 100, (("Region4","DC4"),("Region1","DC1")): 60,\ + (("Region4","DC4"),("Region2","DC2")): 20, (("Region2","DC2"),("Region4","DC4")): 40,\ + } + + return inter_region_dict + + +def dummy_nodes_generator(region_dict, inter_region_dict): + """This function creates a new dictionary ``local_dict similar`` to ``region_dict`` with the dummy nodes and their constraints + + Args: + region_dict (dict): dictionary for the current scenario \n + inter_region_dict (dict): dictionary of the inter-region relations + + Returns: + local_dict (dict): + This dictionary copies region_dict, completes the already existing fields of + region_dict to represent the dummy nodes, and adds the following keys:\n + dummy nodes source (resp. target): the list of dummy nodes for which the source (resp. target) + is in the considered region. \n + dummy nodes source (resp. target) slack bounds: dictionary on dummy nodes source (resp. target) + """ + ### Note: The cost of the arc is here chosen to be split equally between the source region and target region + + # region_dict is renamed as local_dict because it no longer contains only information about the region + # but also contains local information that can be linked to the other scenarios (with dummy nodes) + local_dict = region_dict + local_dict["dummy nodes source"] = list() + local_dict["dummy nodes target"] = list() + local_dict["dummy nodes source slack bounds"] = {} + local_dict["dummy nodes target slack bounds"] = {} + for arc in inter_region_dict["arcs"]: + source,target = arc + region_source,node_source = source + region_target,node_target = target + + if region_source == region_dict["name"]: + dummy_node=node_source+node_target + local_dict["nodes"].append(dummy_node) + local_dict["supply"][dummy_node] = 0 + local_dict["dummy nodes source"].append(dummy_node) + local_dict["arcs"].append((node_source, dummy_node)) + local_dict["flow costs"][(node_source, dummy_node)] = inter_region_dict["costs"][(source, target)]/2 #should be adapted to model + local_dict["flow capacities"][(node_source, dummy_node)] = inter_region_dict["capacities"][(source, target)] + local_dict["dummy nodes source slack bounds"][dummy_node] = inter_region_dict["capacities"][(source, target)] + + if region_target == local_dict["name"]: + dummy_node = node_source + node_target + local_dict["nodes"].append(dummy_node) + local_dict["supply"][dummy_node] = 0 + local_dict["dummy nodes target"].append(dummy_node) + local_dict["arcs"].append((dummy_node, node_target)) + local_dict["flow costs"][(dummy_node, node_target)] = inter_region_dict["costs"][(source, target)]/2 #should be adapted to model + local_dict["flow capacities"][(dummy_node, node_target)] = inter_region_dict["capacities"][(source, target)] + local_dict["dummy nodes target slack bounds"][dummy_node] = inter_region_dict["capacities"][(source, target)] + return local_dict + +def _is_partition(L, *lists): + # Step 1: Verify that the union of all sublists contains all elements of L + if set(L) != set().union(*lists): + return False + + # Step 2: Ensure each element in L appears in exactly one sublist + for item in L: + count = 0 + for sublist in lists: + if item in sublist: + count += 1 + if count != 1: + return False + + return True + +def region_dict_creator(scenario_name): + """ Create a scenario for the inter-region max profit distribution example. + + The convention for node names is: + Symbol + number of the region (+ _ + number of the example if needed), \n + with symbols DC for distribution centers, F for factory nodes, B for buyer nodes. \n + For instance: F3_1 is the 1st factory node of region 3. \n + + Args: + scenario_name (str): + Name of the scenario to construct. + + Returns: + region_dict (dict): contains all the information in the given region to create the model. It is composed of:\n + "nodes" (list of str): all the nodes. The following subsets are also nodes: \n + "factory nodes", "buyer nodes", "distribution center nodes", \n + "arcs" (list of 2 tuples of str) : (node, node) pairs\n + "supply" (dict[n] of float): supply; keys are nodes (negative for demand)\n + "production costs" (dict of float): at each factory node\n + "revenues" (dict of float): at each buyer node \n + "flow costs" (dict[a] of float) : costs per unit flow on each arc \n + "flow capacities" (dict[a] of floats) : upper bound capacities of each arc \n + """ + if scenario_name == "Region1" : + # Creates data for Region1 + region_dict={"name": "Region1"} + region_dict["nodes"] = ["F1_1", "F1_2", "DC1", "B1_1", "B1_2"] + region_dict["factory nodes"] = ["F1_1","F1_2"] + region_dict["buyer nodes"] = ["B1_1","B1_2"] + region_dict["distribution center nodes"]= ["DC1"] + region_dict["supply"] = {"F1_1": 80, "F1_2": 70, "B1_1": -60, "B1_2": -90, "DC1": 0} + region_dict["arcs"] = [("F1_1","DC1"), ("F1_2","DC1"), ("DC1","B1_1"), + ("DC1","B1_2"), ("F1_1","B1_1"), ("F1_2","B1_2")] + + region_dict["production costs"] = {"F1_1": 50, "F1_2": 80} + region_dict["revenues"] = {"B1_1": 800, "B1_2": 900} + # most capacities are 50, so start there and then override + region_dict["flow capacities"] = {a: 50 for a in region_dict["arcs"]} + region_dict["flow capacities"][("F1_1","B1_1")] = None + region_dict["flow capacities"][("F1_2","B1_2")] = None + region_dict["flow costs"] = {("F1_1","DC1"): 300, ("F1_2","DC1"): 500, ("DC1","B1_1"): 200, + ("DC1","B1_2"): 400, ("F1_1","B1_1"): 700, ("F1_2","B1_2"): 1000} + + elif scenario_name=="Region2": + # Creates data for Region2 + region_dict={"name": "Region2"} + region_dict["nodes"] = ["DC2", "B2_1", "B2_2", "B2_3"] + region_dict["factory nodes"] = list() + region_dict["buyer nodes"] = ["B2_1","B2_2","B2_3"] + region_dict["distribution center nodes"]= ["DC2"] + region_dict["supply"] = {"B2_1": -200, "B2_2": -150, "B2_3": -100, "DC2": 0} + region_dict["arcs"] = [("DC2","B2_1"), ("DC2","B2_2"), ("DC2","B2_3")] + + region_dict["production costs"] = {} + region_dict["revenues"] = {"B2_1": 900, "B2_2": 800, "B2_3":1200} + region_dict["flow capacities"] = {("DC2","B2_1"): 200, ("DC2","B2_2"): 150, ("DC2","B2_3"): 100} + region_dict["flow costs"] = {("DC2","B2_1"): 100, ("DC2","B2_2"): 200, ("DC2","B2_3"): 300} + + elif scenario_name == "Region3" : + # Creates data for Region3 + region_dict={"name": "Region3"} + region_dict["nodes"] = ["F3_1", "F3_2", "DC3_1", "DC3_2", "B3_1", "B3_2"] + region_dict["factory nodes"] = ["F3_1","F3_2"] + region_dict["buyer nodes"] = ["B3_1","B3_2"] + region_dict["distribution center nodes"]= ["DC3_1","DC3_2"] + region_dict["supply"] = {"F3_1": 80, "F3_2": 60, "B3_1": -100, "B3_2": -100, "DC3_1": 0, "DC3_2": 0} + region_dict["arcs"] = [("F3_1","DC3_1"), ("F3_2","DC3_2"), ("DC3_1","B3_1"), + ("DC3_2","B3_2"), ("DC3_1","DC3_2"), ("DC3_2","DC3_1")] + + region_dict["production costs"] = {"F3_1": 50, "F3_2": 50} + region_dict["revenues"] = {"B3_1": 900, "B3_2": 700} + region_dict["flow capacities"] = {("F3_1","DC3_1"): 80, ("F3_2","DC3_2"): 60, ("DC3_1","B3_1"): 100, + ("DC3_2","B3_2"): 100, ("DC3_1","DC3_2"): 70, ("DC3_2","DC3_1"): 50} + region_dict["flow costs"] = {("F3_1","DC3_1"): 100, ("F3_2","DC3_2"): 100, ("DC3_1","B3_1"): 201, + ("DC3_2","B3_2"): 200, ("DC3_1","DC3_2"): 100, ("DC3_2","DC3_1"): 100} + + elif scenario_name == "Region4": + # Creates data for Region4 + region_dict={"name": "Region4"} + region_dict["nodes"] = ["F4_1", "F4_2", "DC4", "B4_1", "B4_2"] + region_dict["factory nodes"] = ["F4_1","F4_2"] + region_dict["buyer nodes"] = ["B4_1","B4_2"] + region_dict["distribution center nodes"] = ["DC4"] + region_dict["supply"] = {"F4_1": 200, "F4_2": 30, "B4_1": -100, "B4_2": -100, "DC4": 0} + region_dict["arcs"] = [("F4_1","DC4"), ("F4_2","DC4"), ("DC4","B4_1"), ("DC4","B4_2")] + + region_dict["production costs"] = {"F4_1": 50, "F4_2": 50} + region_dict["revenues"] = {"B4_1": 900, "B4_2": 700} + region_dict["flow capacities"] = {("F4_1","DC4"): 80, ("F4_2","DC4"): 60, ("DC4","B4_1"): 100, ("DC4","B4_2"): 100} + region_dict["flow costs"] = {("F4_1","DC4"): 100, ("F4_2","DC4"): 80, ("DC4","B4_1"): 90, ("DC4","B4_2"): 70} + + else: + raise RuntimeError (f"unknown Region name {scenario_name}") + + assert _is_partition(region_dict["nodes"], region_dict["factory nodes"], region_dict["buyer nodes"], region_dict["distribution center nodes"]) + + return region_dict + + +###Creates the model when local_dict is given +def min_cost_distr_problem(local_dict, sense=pyo.minimize): + """ Create an arcs formulation of network flow + + Args: + local_dict (dict): dictionary representing a region including the dummy nodes \n + sense (=pyo.minimize): we aim to minimize the cost, this should always be minimize + + Returns: + model (Pyomo ConcreteModel) : the instantiated model + """ + # Assert sense == pyo.minimize, "sense should be equal to pyo.minimize" + # First, make the special In, Out arc lists for each node + arcsout = {n: list() for n in local_dict["nodes"]} + arcsin = {n: list() for n in local_dict["nodes"]} + for a in local_dict["arcs"]: + arcsout[a[0]].append(a) + arcsin[a[1]].append(a) + + model = pyo.ConcreteModel(name='MinCostFlowArcs') + def flowBounds_rule(model, i,j): + return (0, local_dict["flow capacities"][(i,j)]) + model.flow = pyo.Var(local_dict["arcs"], bounds=flowBounds_rule) # x + + def slackBounds_rule(model, n): + if n in local_dict["factory nodes"]: + return (0, local_dict["supply"][n]) + elif n in local_dict["buyer nodes"]: + return (local_dict["supply"][n], 0) + elif n in local_dict["dummy nodes source"]: + return (0,local_dict["dummy nodes source slack bounds"][n]) + elif n in local_dict["dummy nodes target"]: #this slack will respect the opposite flow balance rule + return (0,local_dict["dummy nodes target slack bounds"][n]) + elif n in local_dict["distribution center nodes"]: + return (0,0) + else: + raise ValueError(f"unknown node type for node {n}") + + model.y = pyo.Var(local_dict["nodes"], bounds=slackBounds_rule) + + model.MinCost = pyo.Objective(expr=\ + sum(local_dict["flow costs"][a]*model.flow[a] for a in local_dict["arcs"]) \ + + sum(local_dict["production costs"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["factory nodes"]) \ + + sum(local_dict["revenues"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["buyer nodes"]) , + sense=sense) + + def FlowBalance_rule(m, n): + #we change the definition of the slack for target dummy nodes so that we have the slack from the source and from the target equal + if n in local_dict["dummy nodes target"]: + return sum(m.flow[a] for a in arcsout[n])\ + - sum(m.flow[a] for a in arcsin[n])\ + - m.y[n] == local_dict["supply"][n] + else: + return sum(m.flow[a] for a in arcsout[n])\ + - sum(m.flow[a] for a in arcsin[n])\ + + m.y[n] == local_dict["supply"][n] + model.FlowBalance= pyo.Constraint(local_dict["nodes"], rule=FlowBalance_rule) + + return model + + +###Creates the scenario +def scenario_creator(scenario_name, num_scens=None): + """Creates the model, which should include the consensus variables. \n + However, this function shouldn't attach the consensus variables to root nodes, as it is done in admmWrapper. + + Args: + scenario_name (str): the name of the scenario that will be created. Here is of the shape f"Region{i}" with 1<=i<=num_scens \n + num_scens (int): number of scenarios (regions). Useful to create the corresponding inter-region dictionary + + Returns: + Pyomo ConcreteModel: the instantiated model + """ + assert (num_scens is not None) + inter_region_dict = inter_region_dict_creator(num_scens) + region_dict = region_dict_creator(scenario_name) + # Adding dummy nodes and associated features + local_dict = dummy_nodes_generator(region_dict, inter_region_dict) + # Generating the model + model = min_cost_distr_problem(local_dict) + + varlist = list() + sputils.attach_root_node(model, model.MinCost, varlist) + + return model + + +###Functions required in other files, which constructions are specific to the problem + +def scenario_denouement(rank, scenario_name, scenario): + """for each scenario prints its name and the final variable values + + Args: + rank (int): not used here, but could be helpful to know the location + scenario_name (str): name of the scenario + scenario (Pyomo ConcreteModel): the instantiated model + """ + print(f"flow values for {scenario_name}") + scenario.flow.pprint() + print(f"slack values for {scenario_name}") + scenario.y.pprint() + + +def consensus_vars_creator(num_scens): + """The following function creates the consensus_vars dictionary thanks to the inter-region dictionary. \n + This dictionary has redundant information, but is useful for admmWrapper. + + Args: + num_scens (int): select the number of scenarios (regions) wanted + + Returns: + dict: dictionary which keys are the regions and values are the list of consensus variables + present in the region + """ + # Due to the small size of inter_region_dict, it is not given as argument but rather created. + inter_region_dict = inter_region_dict_creator(num_scens) + consensus_vars = {} + for arc in inter_region_dict["arcs"]: + source,target = arc + region_source,node_source = source + region_target,node_target = target + dummy_node = node_source + node_target + vstr = f"y[{dummy_node}]" #variable name as string, y is the slack + + #adds dummy_node in the source region + if not region_source in consensus_vars: #initiates consensus_vars[region_source] + consensus_vars[region_source] = list() + consensus_vars[region_source].append(vstr) + + #adds dummy_node in the target region + if not region_target in consensus_vars: #initiates consensus_vars[region_target] + consensus_vars[region_target] = list() + consensus_vars[region_target].append(vstr) + return consensus_vars + + +def scenario_names_creator(num_scens): + """Creates the name of every scenario. + + Args: + num_scens (int): number of scenarios + + Returns: + list (str): the list of names + """ + return [f"Region{i+1}" for i in range(num_scens)] + + +def kw_creator(cfg): + """ + Args: + cfg (config): specifications for the problem. We only look at the number of scenarios + + Returns: + dict (str): the kwargs that are used in distr.scenario_creator, here {"num_scens": num_scens} + """ + kwargs = {"num_scens" : cfg.get('num_scens', None), + } + if not kwargs["num_scens"] in [2, 3, 4]: + RuntimeError (f"unexpected number of regions {cfg.num_scens}, whould be in [2, 3, 4]") + return kwargs + + +def inparser_adder(cfg): + #requires the user to give the number of scenarios + cfg.num_scens_required() \ No newline at end of file diff --git a/mpisppy/tests/test_admmWrapper.py b/mpisppy/tests/test_admmWrapper.py new file mode 100644 index 000000000..426c2fa9e --- /dev/null +++ b/mpisppy/tests/test_admmWrapper.py @@ -0,0 +1,75 @@ +import unittest +import mpisppy.utils.admmWrapper as admmWrapper +import examples.distr as distr +from mpisppy.utils import config +from mpisppy import MPI + +class TestAdmmWrapper(unittest.TestCase): + def setUp(self): + pass + + def tearDown(self): + pass + + def _cfg_creator(self, num_scens): + cfg = config.Config() + + cfg.num_scens_required() + cfg.num_scens = num_scens + return cfg + + + def _make_admm(self, num_scens,verbose=False): + cfg = self._cfg_creator(num_scens) + options = {} + all_scenario_names = distr.scenario_names_creator(num_scens=cfg.num_scens) + scenario_creator = distr.scenario_creator + scenario_creator_kwargs = distr.kw_creator(cfg) + consensus_vars = distr.consensus_vars_creator(cfg.num_scens) + n_cylinders = 1 #distr_admm_cylinders._count_cylinders(cfg) + return admmWrapper.AdmmWrapper(options, + all_scenario_names, + scenario_creator, + consensus_vars, + n_cylinders=n_cylinders, + mpicomm=MPI.COMM_WORLD, + scenario_creator_kwargs=scenario_creator_kwargs, + verbose=verbose, + ) + + def test_constructor(self): + self._make_admm(2,verbose=True) + for i in range(3,5): + self._make_admm(i) + + def test_variable_probability(self): + admm = self._make_admm(3) + q = dict() + for sname, s in admm.local_scenarios.items(): + q[sname] = admm.var_prob_list(s) + self.assertEqual(q["Region1"][0][1], 0.5) + self.assertEqual(q["Region3"][0][1], 0) + + def test_admmWrapper_scenario_creator(self): + admm = self._make_admm(3) + sname = "Region3" + q = admm.admmWrapper_scenario_creator(sname) + self.assertTrue(q.y__DC1DC2__.is_fixed()) + self.assertFalse(q.y["DC3_1DC1"].is_fixed()) + + def _slack_name(self, dummy_node): + return f"y[{dummy_node}]" + + def test_assign_variable_probs_error1(self): + admm = self._make_admm(3) + admm.consensus_vars["Region1"].append(self._slack_name("DC2DC3")) + self.assertRaises(RuntimeError, admm.assign_variable_probs) + + def test_assign_variable_probs_error2(self): + admm = self._make_admm(3) + admm.consensus_vars["Region1"].remove(self._slack_name("DC3_1DC1")) + self.assertRaises(RuntimeError, admm.assign_variable_probs) + + +if __name__ == '__main__': + unittest.main() diff --git a/mpisppy/utils/admmWrapper.py b/mpisppy/utils/admmWrapper.py new file mode 100644 index 000000000..6795a7fb6 --- /dev/null +++ b/mpisppy/utils/admmWrapper.py @@ -0,0 +1,162 @@ +#creating the class AdmmWrapper +import mpisppy.utils.sputils as sputils +import pyomo.environ as pyo +import mpisppy +from collections import OrderedDict +from mpisppy import MPI +global_rank = MPI.COMM_WORLD.Get_rank() + +def _consensus_vars_number_creator(consensus_vars): + """associates to each consensus vars the number of time it appears + + Args: + consensus_vars (dict): dictionary which keys are the subproblems and values are the list of consensus variables + present in the subproblem + + Returns: + consensus_vars_number (dict): dictionary whose keys are the consensus variables + and values are the number of subproblems the variable is linked to. + """ + consensus_vars_number={} + for subproblem in consensus_vars: + for var in consensus_vars[subproblem]: + if not var in consensus_vars_number: # instanciates consensus_vars_number[var] + consensus_vars_number[var] = 0 + consensus_vars_number[var] += 1 + for var in consensus_vars_number: + if consensus_vars_number[var] == 1: + print(f"The consensus variable {var} appears in a single subproblem") + return consensus_vars_number + +class AdmmWrapper(): + """ This class assigns variable probabilities and creates wrapper for the scenario creator + + Args: + options (dict): options + all_scenario_names (list): all scenario names + scenario_creator (fct): returns a concrete model with special things + consensus_vars (dict): dictionary which keys are the subproblems and values are the list of consensus variables + present in the subproblem + n_cylinder (int): number of cylinders that will ultimately be used + mpicomm (MPI comm): creates communication + scenario_creator_kwargs (dict): kwargs passed directly to scenario_creator. + verbose (boolean): if True gives extra debugging information + + Attributes: + local_scenarios (dict of scenario objects): concrete models with + extra data, key is name + local_scenario_names (list): names of locals + """ + def __init__(self, + options, + all_scenario_names, + scenario_creator, #supplied by the user/ modeller, used only here + consensus_vars, + n_cylinders, + mpicomm, + scenario_creator_kwargs=None, + verbose=None, + ): + assert len(options) == 0, "no options supported by AdmmWrapper" + # We need local_scenarios + self.local_scenarios = {} + scenario_tree = sputils._ScenTree(["ROOT"], all_scenario_names) + assert mpicomm.Get_size() % n_cylinders == 0, \ + f"{mpicomm.Get_size()=} and {n_cylinders=}, but {mpicomm.Get_size() % n_cylinders=} should be 0" + ranks_per_cylinder = mpicomm.Get_size() // n_cylinders + + scenario_names_to_rank, _rank_slices, _scenario_slices =\ + scenario_tree.scen_names_to_ranks(ranks_per_cylinder) + + self.cylinder_rank = mpicomm.Get_rank() // n_cylinders + #self.cylinder_rank = mpicomm.Get_rank() % ranks_per_cylinder + self.all_scenario_names = all_scenario_names + #taken from spbase + self.local_scenario_names = [ + all_scenario_names[i] for i in _rank_slices[self.cylinder_rank] + ] + for sname in self.local_scenario_names: + s = scenario_creator(sname, **scenario_creator_kwargs) + self.local_scenarios[sname] = s + #we are not collecting instantiation time + + self.consensus_vars = consensus_vars + self.verbose = verbose + self.consensus_vars_number = _consensus_vars_number_creator(consensus_vars) + #check_consensus_vars(consensus_vars) + self.assign_variable_probs(verbose=self.verbose) + self.number_of_scenario = len(all_scenario_names) + + def var_prob_list(self, s): + """Associates probabilities to variables and raises exceptions if the model doesn't match the dictionary consensus_vars + + Args: + s (Pyomo ConcreteModel): scenario + + Returns: + list: list of pairs (variables id (int), probabilities (float)). The order of variables is invariant with the scenarios. + If the consensus variable is present in the scenario it is associated with a probability 1/#subproblem + where it is present. Otherwise it has a probability 0. + """ + return self.varprob_dict[s] + + def assign_variable_probs(self, verbose=False): + self.varprob_dict = {} + + #we collect the consensus variables + all_consensus_vars = {var_stage_tuple: None for admm_subproblem_names in self.consensus_vars for var_stage_tuple in self.consensus_vars[admm_subproblem_names]} + error_list1 = [] + error_list2 = [] + for sname,s in self.local_scenarios.items(): + if verbose: + print(f"AdmmWrapper.assign_variable_probs is processing scenario: {sname}") + varlist = list() + self.varprob_dict[s] = list() + for vstr in all_consensus_vars.keys(): + v = s.find_component(vstr) + if vstr in self.consensus_vars[sname]: + if v is not None: + #variables that should be on the model + self.varprob_dict[s].append((id(v),1/(self.consensus_vars_number[vstr]))) + else: + error_list1.append((sname,vstr)) + else: + if v is None: + # This var will not be indexed but that might not matter?? + # Going to replace the brackets + v2str = vstr.replace("[","__").replace("]","__") # To distinguish the consensus_vars fixed at 0 + v = pyo.Var() + + ### Lines equivalent to setattr(s, v2str, v) without warning + s.del_component(v2str) + s.add_component(v2str, v) + + v.fix(0) + self.varprob_dict[s].append((id(v),0)) + else: + error_list2.append((sname,vstr)) + if v is not None: #if None the error is trapped earlier + varlist.append(v) + objfunc = sputils.find_active_objective(s) + #this will overwrite the nonants already there + sputils.attach_root_node(s, objfunc, varlist) + + if len(error_list1) + len(error_list2) > 0: + raise RuntimeError (f"for each pair (scenario, variable) of the following list, the variable appears" + f"in consensus_vars, but not in the model:\n {error_list1} \n" + f"for each pair (scenario, variable) of the following list, the variable appears " + f"in the model, but not in consensus var: \n {error_list2}") + + + def admmWrapper_scenario_creator(self, sname): + #this is the function the user will supply for all cylinders + assert sname in self.local_scenario_names, f"{global_rank=} {sname=} \n {self.local_scenario_names=}" + #should probably be deleted as it takes time + scenario = self.local_scenarios[sname] + + # Grabs the objective function and multiplies its value by the number of scenarios to compensate for the probabilities + obj = sputils.find_active_objective(scenario) + obj.expr = obj.expr * self.number_of_scenario + + return scenario + \ No newline at end of file diff --git a/mpisppy/utils/cfg_vanilla.py b/mpisppy/utils/cfg_vanilla.py index cb88bf74d..8db9496bf 100644 --- a/mpisppy/utils/cfg_vanilla.py +++ b/mpisppy/utils/cfg_vanilla.py @@ -512,7 +512,8 @@ def xhatxbar_spoke( all_scenario_names, scenario_creator_kwargs=None, variable_probability=None, - ph_extensions=None + ph_extensions=None, + all_nodenames=None, ): xhatxbar_dict = _Xhat_Eval_spoke_foundation( XhatXbarInnerBound, @@ -522,6 +523,7 @@ def xhatxbar_spoke( all_scenario_names, scenario_creator_kwargs=scenario_creator_kwargs, ph_extensions=ph_extensions, + all_nodenames=all_nodenames, ) xhatxbar_dict["opt_kwargs"]["options"]['bundles_per_rank'] = 0 # no bundles for xhat @@ -702,7 +704,8 @@ def ph_ob_spoke( all_scenario_names, scenario_creator_kwargs=None, rho_setter=None, - all_nodenames = None, + all_nodenames=None, + variable_probability=None, ): shoptions = shared_options(cfg) ph_ob_spoke = { @@ -715,7 +718,8 @@ def ph_ob_spoke( "scenario_creator_kwargs": scenario_creator_kwargs, 'scenario_denouement': scenario_denouement, "rho_setter": rho_setter, - "all_nodenames": all_nodenames + "all_nodenames": all_nodenames, + "variable_probability": variable_probability, } } if cfg.ph_ob_rho_rescale_factors_json is not None: diff --git a/setup.py b/setup.py index a53d05d0b..cf2bbe59a 100644 --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ packages=packages, python_requires='>=3.8', install_requires=[ - 'numpy', + 'numpy<2', 'scipy', 'pyomo>=6.4', ],